diff --git a/.github/workflows/benchmark-prs.yml b/.github/workflows/benchmark-prs.yml index 3d3aa4bd77..af1b3ce0fe 100644 --- a/.github/workflows/benchmark-prs.yml +++ b/.github/workflows/benchmark-prs.yml @@ -9,386 +9,386 @@ env: NODE_DATA_PATH: /home/runner/.local/share/safe/node jobs: - benchmark-cli: - name: Compare sn_cli benchmarks to main - # right now only ubuntu, running on multiple systems would require many pushes...\ - # perhaps this can be done with one consolidation action in the future, pulling down all results and pushing - # once to the branch.. - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - uses: dtolnay/rust-toolchain@stable - with: - components: rustfmt, clippy - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - ######################## - ### Setup ### - ######################## - - run: cargo install cargo-criterion - - - name: install ripgrep - run: sudo apt-get -y install ripgrep - - - name: Download 95mb file to be uploaded with the safe client - shell: bash - run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - - # As normal user won't care much about initial client startup, - # but be more alerted on communication speed during transmission. - # Meanwhile the criterion testing code includes the client startup as well, - # it will be better to execute bench test with `local-discovery`, - # to make the measurement results reflect speed improvement or regression more accurately. - - name: Build sn bins - run: cargo build --release --bin safe --bin safenode --features local-discovery - timeout-minutes: 30 - - - name: Build faucet bin - run: cargo build --release --bin faucet --features local-discovery --features gifting --no-default-features - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - env: - SN_LOG: "all" - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ubuntu-latest - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" - - ######################### - ### Upload large file ### - ######################### - - - name: Fund cli wallet - shell: bash - run: target/release/safe --log-output-dest=data-dir wallet get-faucet 127.0.0.1:8000 - env: - SN_LOG: "all" - - - name: Start a client instance to compare memory usage - shell: bash - run: target/release/safe --log-output-dest=data-dir files upload the-test-data.zip --retry-strategy quick - env: - SN_LOG: "all" - - - name: Cleanup uploaded_files folder to avoid pollute download benchmark - shell: bash - run: rm -rf $CLIENT_DATA_PATH/uploaded_files - - ########################### - ### Client Mem Analysis ### - ########################### - - - name: Check client memory usage - shell: bash - run: | - client_peak_mem_limit_mb="1024" # mb - client_avg_mem_limit_mb="512" # mb - - peak_mem_usage=$( - rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/{print $2}' | - sort -n | - tail -n 1 - ) - echo "Peak memory usage: $peak_mem_usage MB" - if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then - echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" - exit 1 - fi - - total_mem=$( - rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' - ) - num_of_times=$( - rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "num_of_times: $num_of_times" - echo "Total memory is: $total_mem" - average_mem=$(($total_mem/$(($num_of_times)))) - echo "Average memory is: $average_mem" - - if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then - echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" - exit 1 - fi - # Write the client memory usage to a file - echo '[ - { - "name": "client-peak-memory-usage-during-upload", - "value": '$peak_mem_usage', - "unit": "MB" - }, - { - "name": "client-average-memory-usage-during-upload", - "value": '$average_mem', - "unit": "MB" - } - ]' > client_memory_usage.json - - - name: check client_memory_usage.json - shell: bash - run: cat client_memory_usage.json - - - name: Alert for client memory usage - uses: benchmark-action/github-action-benchmark@v1 - with: - name: "Memory Usage of Client during uploading large file" - tool: "customSmallerIsBetter" - output-file-path: client_memory_usage.json - # Where the previous data file is stored - external-data-json-path: ./cache/client-mem-usage.json - # Workflow will fail when an alert happens - fail-on-alert: true - # GitHub API token to make a commit comment - github-token: ${{ secrets.GITHUB_TOKEN }} - # Enable alert commit comment - comment-on-alert: true - # 200% regression will result in alert - alert-threshold: "200%" - # Enable Job Summary for PRs - summary-always: true - - ######################## - ### Benchmark ### - ######################## - - name: Bench `safe` cli - shell: bash - # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, - # passes to tee which displays it in the terminal and writes to output.txt - run: | - cargo criterion --features=local-discovery --message-format=json 2>&1 -p sn_cli | tee -a output.txt - cat output.txt | rg benchmark-complete | jq -s 'map({ - name: (.id | split("/"))[-1], - unit: "MiB/s", - value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9)) - })' > files-benchmark.json - timeout-minutes: 15 - - - name: Confirming the number of files uploaded and downloaded during the benchmark test - shell: bash - run: | - ls -l $CLIENT_DATA_PATH - ls -l $CLIENT_DATA_PATH/uploaded_files - ls -l $CLIENT_DATA_PATH/safe_files - - - name: Store benchmark result - uses: benchmark-action/github-action-benchmark@v1 - with: - # What benchmark tool the output.txt came from - tool: "customBiggerIsBetter" - output-file-path: files-benchmark.json - # Where the previous data file is stored - external-data-json-path: ./cache/benchmark-data.json - # Workflow will fail when an alert happens - fail-on-alert: true - # GitHub API token to make a commit comment - github-token: ${{ secrets.GITHUB_TOKEN }} - # Enable alert commit comment - comment-on-alert: true - # 200% regression will result in alert - alert-threshold: "200%" - # Enable Job Summary for PRs - summary-always: true - - - name: Start a client to carry out download to output the logs - shell: bash - run: target/release/safe --log-output-dest=data-dir files download --retry-strategy quick - - - name: Start a client to simulate criterion upload - shell: bash - run: | - ls -l target/release - target/release/safe --log-output-dest=data-dir files upload target/release/faucet --retry-strategy quick - - ######################### - ### Stop Network ### - ######################### - - - name: Stop the local network - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_benchmark - platform: ubuntu-latest - build: true - - - name: Upload Faucet folder - uses: actions/upload-artifact@main - with: - name: faucet_folder - path: /home/runner/.local/share/safe/test_faucet - continue-on-error: true - if: always() - - ######################### - ### Node Mem Analysis ### - ######################### - - # The large file uploaded will increase node's peak mem usage a lot - - name: Check node memory usage - shell: bash - run: | - node_peak_mem_limit_mb="250" # mb - peak_mem_usage=$( - rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/{print $2}' | - sort -n | - tail -n 1 - ) - - echo "Memory usage: $peak_mem_usage MB" - if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then - echo "Node memory usage exceeded threshold: $peak_mem_usage MB" - exit 1 - fi - # Write the node memory usage to a file - echo '[ - { - "name": "node-memory-usage-through-safe-benchmark", - "value": '$peak_mem_usage', - "unit": "MB" - } - ]' > node_memory_usage.json - - - name: check node_memory_usage.json - shell: bash - run: cat node_memory_usage.json - - - name: Alert for node memory usage - uses: benchmark-action/github-action-benchmark@v1 - with: - tool: "customSmallerIsBetter" - output-file-path: node_memory_usage.json - # Where the previous data file is stored - external-data-json-path: ./cache/node-mem-usage.json - # Workflow will fail when an alert happens - fail-on-alert: true - # GitHub API token to make a commit comment - github-token: ${{ secrets.GITHUB_TOKEN }} - # Enable alert commit comment - comment-on-alert: true - # Comment on the PR - comment-always: true - # 200% regression will result in alert - alert-threshold: "200%" - # Enable Job Summary for PRs - summary-always: true - - ########################################### - ### Swarm_driver handling time Analysis ### - ########################################### - - - name: Check swarm_driver handling time - shell: bash - run: | - num_of_times=$( - rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "Number of long cmd handling times: $num_of_times" - total_long_handling_ms=$( - rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - ) - echo "Total cmd long handling time is: $total_long_handling_ms ms" - average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - echo "Average cmd long handling time is: $average_handling_ms ms" - total_long_handling=$(($total_long_handling_ms)) - total_num_of_times=$(($num_of_times)) - num_of_times=$( - rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "Number of long event handling times: $num_of_times" - total_long_handling_ms=$( - rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - ) - echo "Total event long handling time is: $total_long_handling_ms ms" - average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - echo "Average event long handling time is: $average_handling_ms ms" - total_long_handling=$(($total_long_handling_ms+$total_long_handling)) - total_num_of_times=$(($num_of_times+$total_num_of_times)) - average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) - echo "Total swarm_driver long handling times is: $total_num_of_times" - echo "Total swarm_driver long handling duration is: $total_long_handling ms" - echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" - total_num_of_times_limit_hits="30000" # hits - total_long_handling_limit_ms="400000" # ms - average_handling_limit_ms="20" # ms - if (( $(echo "$total_num_of_times > $total_num_of_times_limit_hits" | bc -l) )); then - echo "Swarm_driver long handling times exceeded threshold: $total_num_of_times hits" - exit 1 - fi - if (( $(echo "$total_long_handling > $total_long_handling_limit_ms" | bc -l) )); then - echo "Swarm_driver total long handling duration exceeded threshold: $total_long_handling ms" - exit 1 - fi - if (( $(echo "$average_handling_ms > $average_handling_limit_ms" | bc -l) )); then - echo "Swarm_driver average long handling time exceeded threshold: $average_handling_ms ms" - exit 1 - fi - - # Write the node memory usage to a file - echo '[ - { - "name": "swarm_driver long handling times", - "value": '$total_num_of_times', - "unit": "hits" - }, - { - "name": "swarm_driver long handling total_time", - "value": '$total_long_handling', - "unit": "ms" - }, - { - "name": "swarm_driver average long handling time", - "value": '$average_handling_ms', - "unit": "ms" - } - ]' > swarm_driver_long_handlings.json - - - name: check swarm_driver_long_handlings.json - shell: bash - run: cat swarm_driver_long_handlings.json - - - name: Alert for swarm_driver long handlings - uses: benchmark-action/github-action-benchmark@v1 - with: - tool: "customSmallerIsBetter" - output-file-path: swarm_driver_long_handlings.json - # Where the previous data file is stored - external-data-json-path: ./cache/swarm_driver_long_handlings.json - # Workflow will fail when an alert happens - fail-on-alert: true - # GitHub API token to make a commit comment - github-token: ${{ secrets.GITHUB_TOKEN }} - # Enable alert commit comment - comment-on-alert: true - # Comment on the PR - comment-always: true - # 200% regression will result in alert - alert-threshold: "200%" - # Enable Job Summary for PRs - summary-always: true + # benchmark-cli: + # name: Compare sn_cli benchmarks to main + # # right now only ubuntu, running on multiple systems would require many pushes...\ + # # perhaps this can be done with one consolidation action in the future, pulling down all results and pushing + # # once to the branch.. + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 + + # - uses: dtolnay/rust-toolchain@stable + # with: + # components: rustfmt, clippy + + # - uses: Swatinem/rust-cache@v2 + # continue-on-error: true + + # ######################## + # ### Setup ### + # ######################## + # - run: cargo install cargo-criterion + + # - name: install ripgrep + # run: sudo apt-get -y install ripgrep + + # - name: Download 95mb file to be uploaded with the safe client + # shell: bash + # run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip + + # # As normal user won't care much about initial client startup, + # # but be more alerted on communication speed during transmission. + # # Meanwhile the criterion testing code includes the client startup as well, + # # it will be better to execute bench test with `local`, + # # to make the measurement results reflect speed improvement or regression more accurately. + # - name: Build sn bins + # run: cargo build --release --bin safe --bin safenode --features local + # timeout-minutes: 30 + + # - name: Build faucet bin + # run: cargo build --release --bin faucet --features local --features gifting --no-default-features + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # env: + # SN_LOG: "all" + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ubuntu-latest + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" + + # ######################### + # ### Upload large file ### + # ######################### + + # - name: Fund cli wallet + # shell: bash + # run: target/release/safe --log-output-dest=data-dir wallet get-faucet 127.0.0.1:8000 + # env: + # SN_LOG: "all" + + # - name: Start a client instance to compare memory usage + # shell: bash + # run: target/release/safe --log-output-dest=data-dir files upload the-test-data.zip --retry-strategy quick + # env: + # SN_LOG: "all" + + # - name: Cleanup uploaded_files folder to avoid pollute download benchmark + # shell: bash + # run: rm -rf $CLIENT_DATA_PATH/uploaded_files + + # ########################### + # ### Client Mem Analysis ### + # ########################### + + # - name: Check client memory usage + # shell: bash + # run: | + # client_peak_mem_limit_mb="1024" # mb + # client_avg_mem_limit_mb="512" # mb + + # peak_mem_usage=$( + # rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | + # awk -F':' '/"memory_used_mb":/{print $2}' | + # sort -n | + # tail -n 1 + # ) + # echo "Peak memory usage: $peak_mem_usage MB" + # if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then + # echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" + # exit 1 + # fi + + # total_mem=$( + # rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | + # awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' + # ) + # num_of_times=$( + # rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | + # rg "(\d+) matches" | + # rg "\d+" -o + # ) + # echo "num_of_times: $num_of_times" + # echo "Total memory is: $total_mem" + # average_mem=$(($total_mem/$(($num_of_times)))) + # echo "Average memory is: $average_mem" + + # if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then + # echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" + # exit 1 + # fi + # # Write the client memory usage to a file + # echo '[ + # { + # "name": "client-peak-memory-usage-during-upload", + # "value": '$peak_mem_usage', + # "unit": "MB" + # }, + # { + # "name": "client-average-memory-usage-during-upload", + # "value": '$average_mem', + # "unit": "MB" + # } + # ]' > client_memory_usage.json + + # - name: check client_memory_usage.json + # shell: bash + # run: cat client_memory_usage.json + + # - name: Alert for client memory usage + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # name: "Memory Usage of Client during uploading large file" + # tool: "customSmallerIsBetter" + # output-file-path: client_memory_usage.json + # # Where the previous data file is stored + # external-data-json-path: ./cache/client-mem-usage.json + # # Workflow will fail when an alert happens + # fail-on-alert: true + # # GitHub API token to make a commit comment + # github-token: ${{ secrets.GITHUB_TOKEN }} + # # Enable alert commit comment + # comment-on-alert: true + # # 200% regression will result in alert + # alert-threshold: "200%" + # # Enable Job Summary for PRs + # summary-always: true + + # ######################## + # ### Benchmark ### + # ######################## + # - name: Bench `safe` cli + # shell: bash + # # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, + # # passes to tee which displays it in the terminal and writes to output.txt + # run: | + # cargo criterion --features=local --message-format=json 2>&1 -p sn_cli | tee -a output.txt + # cat output.txt | rg benchmark-complete | jq -s 'map({ + # name: (.id | split("/"))[-1], + # unit: "MiB/s", + # value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9)) + # })' > files-benchmark.json + # timeout-minutes: 15 + + # - name: Confirming the number of files uploaded and downloaded during the benchmark test + # shell: bash + # run: | + # ls -l $CLIENT_DATA_PATH + # ls -l $CLIENT_DATA_PATH/uploaded_files + # ls -l $CLIENT_DATA_PATH/safe_files + + # - name: Store benchmark result + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # # What benchmark tool the output.txt came from + # tool: "customBiggerIsBetter" + # output-file-path: files-benchmark.json + # # Where the previous data file is stored + # external-data-json-path: ./cache/benchmark-data.json + # # Workflow will fail when an alert happens + # fail-on-alert: true + # # GitHub API token to make a commit comment + # github-token: ${{ secrets.GITHUB_TOKEN }} + # # Enable alert commit comment + # comment-on-alert: true + # # 200% regression will result in alert + # alert-threshold: "200%" + # # Enable Job Summary for PRs + # summary-always: true + + # - name: Start a client to carry out download to output the logs + # shell: bash + # run: target/release/safe --log-output-dest=data-dir files download --retry-strategy quick + + # - name: Start a client to simulate criterion upload + # shell: bash + # run: | + # ls -l target/release + # target/release/safe --log-output-dest=data-dir files upload target/release/faucet --retry-strategy quick + + # ######################### + # ### Stop Network ### + # ######################### + + # - name: Stop the local network + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_benchmark + # platform: ubuntu-latest + # build: true + + # - name: Upload Faucet folder + # uses: actions/upload-artifact@main + # with: + # name: faucet_folder + # path: /home/runner/.local/share/safe/test_faucet + # continue-on-error: true + # if: always() + + # ######################### + # ### Node Mem Analysis ### + # ######################### + + # # The large file uploaded will increase node's peak mem usage a lot + # - name: Check node memory usage + # shell: bash + # run: | + # node_peak_mem_limit_mb="250" # mb + # peak_mem_usage=$( + # rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | + # awk -F':' '/"memory_used_mb":/{print $2}' | + # sort -n | + # tail -n 1 + # ) + + # echo "Memory usage: $peak_mem_usage MB" + # if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then + # echo "Node memory usage exceeded threshold: $peak_mem_usage MB" + # exit 1 + # fi + # # Write the node memory usage to a file + # echo '[ + # { + # "name": "node-memory-usage-through-safe-benchmark", + # "value": '$peak_mem_usage', + # "unit": "MB" + # } + # ]' > node_memory_usage.json + + # - name: check node_memory_usage.json + # shell: bash + # run: cat node_memory_usage.json + + # - name: Alert for node memory usage + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # tool: "customSmallerIsBetter" + # output-file-path: node_memory_usage.json + # # Where the previous data file is stored + # external-data-json-path: ./cache/node-mem-usage.json + # # Workflow will fail when an alert happens + # fail-on-alert: true + # # GitHub API token to make a commit comment + # github-token: ${{ secrets.GITHUB_TOKEN }} + # # Enable alert commit comment + # comment-on-alert: true + # # Comment on the PR + # comment-always: true + # # 200% regression will result in alert + # alert-threshold: "200%" + # # Enable Job Summary for PRs + # summary-always: true + + # ########################################### + # ### Swarm_driver handling time Analysis ### + # ########################################### + + # - name: Check swarm_driver handling time + # shell: bash + # run: | + # num_of_times=$( + # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | + # rg "(\d+) matches" | + # rg "\d+" -o + # ) + # echo "Number of long cmd handling times: $num_of_times" + # total_long_handling_ms=$( + # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | + # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + # ) + # echo "Total cmd long handling time is: $total_long_handling_ms ms" + # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + # echo "Average cmd long handling time is: $average_handling_ms ms" + # total_long_handling=$(($total_long_handling_ms)) + # total_num_of_times=$(($num_of_times)) + # num_of_times=$( + # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | + # rg "(\d+) matches" | + # rg "\d+" -o + # ) + # echo "Number of long event handling times: $num_of_times" + # total_long_handling_ms=$( + # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | + # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + # ) + # echo "Total event long handling time is: $total_long_handling_ms ms" + # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + # echo "Average event long handling time is: $average_handling_ms ms" + # total_long_handling=$(($total_long_handling_ms+$total_long_handling)) + # total_num_of_times=$(($num_of_times+$total_num_of_times)) + # average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) + # echo "Total swarm_driver long handling times is: $total_num_of_times" + # echo "Total swarm_driver long handling duration is: $total_long_handling ms" + # echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" + # total_num_of_times_limit_hits="30000" # hits + # total_long_handling_limit_ms="400000" # ms + # average_handling_limit_ms="20" # ms + # if (( $(echo "$total_num_of_times > $total_num_of_times_limit_hits" | bc -l) )); then + # echo "Swarm_driver long handling times exceeded threshold: $total_num_of_times hits" + # exit 1 + # fi + # if (( $(echo "$total_long_handling > $total_long_handling_limit_ms" | bc -l) )); then + # echo "Swarm_driver total long handling duration exceeded threshold: $total_long_handling ms" + # exit 1 + # fi + # if (( $(echo "$average_handling_ms > $average_handling_limit_ms" | bc -l) )); then + # echo "Swarm_driver average long handling time exceeded threshold: $average_handling_ms ms" + # exit 1 + # fi + + # # Write the node memory usage to a file + # echo '[ + # { + # "name": "swarm_driver long handling times", + # "value": '$total_num_of_times', + # "unit": "hits" + # }, + # { + # "name": "swarm_driver long handling total_time", + # "value": '$total_long_handling', + # "unit": "ms" + # }, + # { + # "name": "swarm_driver average long handling time", + # "value": '$average_handling_ms', + # "unit": "ms" + # } + # ]' > swarm_driver_long_handlings.json + + # - name: check swarm_driver_long_handlings.json + # shell: bash + # run: cat swarm_driver_long_handlings.json + + # - name: Alert for swarm_driver long handlings + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # tool: "customSmallerIsBetter" + # output-file-path: swarm_driver_long_handlings.json + # # Where the previous data file is stored + # external-data-json-path: ./cache/swarm_driver_long_handlings.json + # # Workflow will fail when an alert happens + # fail-on-alert: true + # # GitHub API token to make a commit comment + # github-token: ${{ secrets.GITHUB_TOKEN }} + # # Enable alert commit comment + # comment-on-alert: true + # # Comment on the PR + # comment-always: true + # # 200% regression will result in alert + # alert-threshold: "200%" + # # Enable Job Summary for PRs + # summary-always: true benchmark-cash: name: Compare sn_transfer benchmarks to main diff --git a/.github/workflows/build-release-artifacts.yml b/.github/workflows/build-release-artifacts.yml index b30d4e1803..4bbc2f8f7b 100644 --- a/.github/workflows/build-release-artifacts.yml +++ b/.github/workflows/build-release-artifacts.yml @@ -17,17 +17,6 @@ on: description: Set to build a particular tag type: string -# The key variables also need to be passed to `cross`, which runs in a container and does not -# inherit variables from the parent environment. The `cross` tool is used in the `build` -# job. If any keys are added, the `build-release-artifacts` target in the Justfile must -# also be updated. -env: - GENESIS_PK: ${{ secrets.STABLE_GENESIS_PK }} - GENESIS_SK: ${{ secrets.STABLE_GENESIS_SK }} - FOUNDATION_PK: ${{ secrets.STABLE_FOUNDATION_PK }} - NETWORK_ROYALTIES_PK: ${{ secrets.STABLE_NETWORK_ROYALTIES_PK }} - PAYMENT_FORWARD_PK: ${{ secrets.STABLE_REWARD_FORWARDING_PK }} - jobs: build: name: build diff --git a/.github/workflows/cross-platform.yml b/.github/workflows/cross-platform.yml index d4d9393008..6beeac321d 100644 --- a/.github/workflows/cross-platform.yml +++ b/.github/workflows/cross-platform.yml @@ -16,7 +16,7 @@ jobs: wasm: if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Wasm builds + name: wasm32-unknown-unknown builds runs-on: ubuntu-latest steps: @@ -29,10 +29,9 @@ jobs: - name: Install wasm-pack run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh - - name: Build client for wasm - # wasm pack doesnt support workspaces - # --dev to avoid a loong optimisation step - run: cd sn_client && wasm-pack build --dev + - name: Build WASM package + # --dev to avoid optimisation + run: wasm-pack build --dev --target=web autonomi timeout-minutes: 30 websocket: diff --git a/.github/workflows/generate-benchmark-charts.yml b/.github/workflows/generate-benchmark-charts.yml index cd61f0e165..27a737a7a7 100644 --- a/.github/workflows/generate-benchmark-charts.yml +++ b/.github/workflows/generate-benchmark-charts.yml @@ -15,7 +15,7 @@ permissions: env: CARGO_INCREMENTAL: "0" RUST_BACKTRACE: 1 - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi NODE_DATA_PATH: /home/runner/.local/share/safe/node jobs: @@ -45,43 +45,29 @@ jobs: shell: bash run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - - name: Build node and client - run: cargo build --release --features local-discovery --bin safenode --bin safe - timeout-minutes: 30 - - - name: Build faucet bin - run: cargo build --release --bin faucet --features local-discovery --features gifting + - name: Build node and cli binaries + run: cargo build --release --features local --bin safenode --bin autonomi timeout-minutes: 30 - name: Start a local network uses: maidsafe/sn-local-testnet-action@main - env: - SN_LOG: "all" with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ubuntu-latest build: true - - - name: Create and fund a wallet to pay for files storage - run: | - cargo run --bin faucet --release -- --log-output-dest=data-dir send 1000000 $(cargo run --bin safe --release -- wallet address | tail -n 1) | tail -n 1 > transfer_hex - cargo run --bin safe --release -- wallet receive --file transfer_hex - env: - SN_LOG: "all" - timeout-minutes: 10 + sn-log: "all" ######################## ### Benchmark ### ######################## - - name: Bench `safe` cli + - name: Bench `autonomi` cli shell: bash # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, # passes to tee which displays it in the terminal and writes to output.txt run: | - cargo criterion --features=local-discovery --message-format=json 2>&1 -p sn_cli | tee -a output.txt + cargo criterion --features=local --message-format=json 2>&1 -p autonomi | tee -a output.txt cat output.txt | rg benchmark-complete | jq -s 'map({ name: (.id | split("/"))[-1], unit: "MiB/s", @@ -107,9 +93,14 @@ jobs: auto-push: true max-items-in-chart: 300 + # FIXME: do this in a generic way for localtestnets + - name: export default secret key + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash + - name: Start a client instance to compare memory usage shell: bash - run: cargo run --bin safe --release -- --log-output-dest=data-dir files upload the-test-data.zip --retry-strategy quick + run: cargo run --bin autonomi --release -- --log-output-dest=data-dir file upload the-test-data.zip env: SN_LOG: "all" diff --git a/.github/workflows/memcheck.yml b/.github/workflows/memcheck.yml index 99f5b93609..55d3790bb5 100644 --- a/.github/workflows/memcheck.yml +++ b/.github/workflows/memcheck.yml @@ -17,506 +17,505 @@ env: RESTART_TEST_NODE_DATA_PATH: /home/runner/.local/share/safe/restart_node FAUCET_LOG_PATH: /home/runner/.local/share/safe/test_faucet/logs -jobs: - memory-check: - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Check we're on the right commit - run: git log -1 --oneline - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: install ripgrep - shell: bash - run: sudo apt-get install -y ripgrep - - - name: Build binaries - run: cargo build --release --bin safe --bin safenode - timeout-minutes: 30 - - - name: Build faucet binary with gifting - run: cargo build --release --bin faucet --features gifting - timeout-minutes: 30 - - - name: Build tests - run: cargo test --release -p sn_node --test data_with_churn --test verify_routing_table --no-run - timeout-minutes: 30 - - - name: Start a node instance that does not undergo churn - run: | - mkdir -p $BOOTSTRAP_NODE_DATA_PATH - ./target/release/safenode --first \ - --root-dir $BOOTSTRAP_NODE_DATA_PATH --log-output-dest $BOOTSTRAP_NODE_DATA_PATH --local --owner=bootstrap & - sleep 10 - env: - SN_LOG: "all" - - - name: Set SAFE_PEERS - run: | - safe_peers=$(rg "Local node is listening .+ on .+" $BOOTSTRAP_NODE_DATA_PATH -u | \ - rg '/ip4.*$' -m1 -o) - echo $safe_peers - echo "SAFE_PEERS=$safe_peers" >> $GITHUB_ENV - - - name: Check SAFE_PEERS was set - shell: bash - run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" - - - name: Start a node instance to be restarted - run: | - mkdir -p $RESTART_TEST_NODE_DATA_PATH - ./target/release/safenode \ - --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restart & - sleep 10 - env: - SN_LOG: "all" - - - name: Start a local network - env: - SN_LOG: "all" - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - build: true - faucet-path: target/release/faucet - interval: 2000 - join: true - node-path: target/release/safenode - owner-prefix: node - platform: ubuntu-latest - set-safe-peers: false - - # In this case we did *not* want SAFE_PEERS to be set to another value by starting the testnet - - name: Check SAFE_PEERS was not changed - shell: bash - run: echo "The SAFE_PEERS variable has been set to ${SAFE_PEERS}" - - - name: Create and fund a wallet to pay for files storage - run: | - echo "Obtaining address for use with the faucet..." - ./target/release/safe --log-output-dest=data-dir wallet create --no-password - address=$(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) - echo "Sending tokens to the faucet at $address" - ./target/release/faucet --log-output-dest=data-dir send 5000000 $address > initial_balance_from_faucet.txt - cat initial_balance_from_faucet.txt - cat initial_balance_from_faucet.txt | tail -n 1 > transfer_hex - cat transfer_hex - ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - env: - SN_LOG: "all" - timeout-minutes: 15 - - - name: Move faucet log to the working folder - run: | - echo "SAFE_DATA_PATH has: " - ls -l $SAFE_DATA_PATH - echo "test_faucet foder has: " - ls -l $SAFE_DATA_PATH/test_faucet - echo "logs folder has: " - ls -l $SAFE_DATA_PATH/test_faucet/logs - mv $FAUCET_LOG_PATH/faucet.log ./faucet_log.log - continue-on-error: true - if: always() - timeout-minutes: 1 - - - name: Download 95mb file to be uploaded with the safe client - shell: bash - run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - - # The resources file we upload may change, and with it mem consumption. - # Be aware! - - name: Start a client to upload files - # -p makes files public - run: | - ls -l - ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data.zip" --retry-strategy quick -p - env: - SN_LOG: "all" - timeout-minutes: 25 - - # this check needs to be after some transfer activity - - name: Check we're warned about using default genesis - run: | - git log -1 --oneline - ls -la $RESTART_TEST_NODE_DATA_PATH - cat $RESTART_TEST_NODE_DATA_PATH/safenode.log - - name: Check we're warned about using default genesis - run: | - git log -1 --oneline - ls -la $BOOTSTRAP_NODE_DATA_PATH - cat $BOOTSTRAP_NODE_DATA_PATH/safenode.log - - - name: Check we're warned about using default genesis - run: | - git log -1 --oneline - ls -la $NODE_DATA_PATH - rg "USING DEFAULT" "$NODE_DATA_PATH" -u - shell: bash - - # Uploading same file using different client shall not incur any payment neither uploads - # Note rg will throw an error directly in case of failed to find a matching pattern. - - name: Start a different client to upload the same file - run: | - pwd - mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first - ls -l $SAFE_DATA_PATH - ls -l $SAFE_DATA_PATH/client_first - mkdir $SAFE_DATA_PATH/client - ls -l $SAFE_DATA_PATH - mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs - ls -l $CLIENT_DATA_PATH - cp ./the-test-data.zip ./the-test-data_1.zip - ./target/release/safe --log-output-dest=data-dir wallet create --no-replace --no-password - ./target/release/faucet --log-output-dest=data-dir send 5000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) > initial_balance_from_faucet_1.txt - cat initial_balance_from_faucet_1.txt - cat initial_balance_from_faucet_1.txt | tail -n 1 > transfer_hex - cat transfer_hex - ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data_1.zip" --retry-strategy quick -p > second_upload.txt - cat second_upload.txt - rg "New wallet balance: 5000000.000000000" second_upload.txt -c --stats - env: - SN_LOG: "all" - timeout-minutes: 25 - - - name: Stop the restart node - run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid ) - - - name: Start the restart node again - run: | - ./target/release/safenode \ - --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restarted & - sleep 10 - env: - SN_LOG: "all" - - - name: Assert we've reloaded some chunks - run: rg "Existing record loaded" $RESTART_TEST_NODE_DATA_PATH - - - name: Chunks data integrity during nodes churn - run: cargo test --release -p sn_node --test data_with_churn -- --nocapture - env: - TEST_DURATION_MINS: 5 - TEST_TOTAL_CHURN_CYCLES: 15 - SN_LOG: "all" - timeout-minutes: 30 - - - name: Check current files - run: ls -la - - name: Check safenode file - run: ls /home/runner/work/safe_network/safe_network/target/release - - - name: Check there was no restart issues - run: | - if rg 'Failed to execute hard-restart command' $NODE_DATA_PATH; then - echo "Restart issues detected" - exit 1 - else - echo "No restart issues detected" - fi - - - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture - env: - SLEEP_BEFORE_VERIFICATION: 300 - timeout-minutes: 10 - - - name: Verify restart of nodes using rg - shell: bash - timeout-minutes: 1 - # get the counts, then the specific line, and then the digit count only - # then check we have an expected level of restarts - # TODO: make this use an env var, or relate to testnet size - run: | - restart_count=$(rg "Node is restarting in" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Restart $restart_count nodes" - peer_removed=$(rg "PeerRemovedFromRoutingTable" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "PeerRemovedFromRoutingTable $peer_removed times" - if [ $peer_removed -lt $restart_count ]; then - echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - exit 1 - fi - node_count=$(ls $NODE_DATA_PATH | wc -l) - echo "Node dir count is $node_count" - # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here - # if [ $restart_count -lt $node_count ]; then - # echo "Restart count of: $restart_count is less than the node count of: $node_count" - # exit 1 - # fi - - - name: Verify data replication using rg - shell: bash - timeout-minutes: 1 - # get the counts, then the specific line, and then the digit count only - # then check we have an expected level of replication - # TODO: make this use an env var, or relate to testnet size - # As the bootstrap_node using separate folder for logging, - # hence the folder input to rg needs to cover that as well. - run: | - sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Sent $sending_list_count replication lists" - received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Received $received_list_count replication lists" - fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Carried out $fetching_attempt_count fetching attempts" - if: always() - - - name: Start a client to download files - run: | - ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick - ls -l $CLIENT_DATA_PATH/safe_files - downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) - if [ $downloaded_files -lt 1 ]; then - echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" - exit 1 - fi - env: - SN_LOG: "all" - timeout-minutes: 10 - - # Download the same files again to ensure files won't get corrupted. - - name: Start a client to download the same files again - run: | - ./target/release/safe --log-output-dest=data-dir files download --show-holders --retry-strategy quick - ls -l $CLIENT_DATA_PATH/safe_files - downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) - if [ $downloaded_files -lt 1 ]; then - echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" - exit 1 - fi - file_size1=$(stat -c "%s" ./the-test-data_1.zip) - file_size2=$(stat -c "%s" $CLIENT_DATA_PATH/safe_files/the-test-data_1.zip) - if [ $file_size1 != $file_size2 ]; then - echo "The downloaded file has a different size $file_size2 to the original $file_size1." - exit 1 - fi - env: - SN_LOG: "all" - timeout-minutes: 10 - - - name: Audit from genesis to collect entire spend DAG and dump to a dot file - run: | - ./target/release/safe --log-output-dest=data-dir wallet audit --dot --sk-str 49113d2083f57a976076adbe85decb75115820de1e6e74b47e0429338cef124a > spend_dag_and_statistics.txt - echo "==============================================================================" - cat spend_dag_and_statistics.txt - env: - SN_LOG: "all" - timeout-minutes: 5 - if: always() - - - name: Ensure discord_ids decrypted - run: | - rg 'node_' ./spend_dag_and_statistics.txt -o - timeout-minutes: 1 - if: always() - - - name: Check nodes running - shell: bash - timeout-minutes: 1 - continue-on-error: true - run: pgrep safenode | wc -l - if: always() - - - name: Wait before verifying reward forwarding - run: sleep 300 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_memcheck - platform: ubuntu-latest - build: true - - - name: Check node memory usage - shell: bash - # The resources file and churning chunk_size we upload may change, and with it mem consumption. - # This is set to a value high enough to allow for some variation depending on - # resources and node location in the network, but hopefully low enough to catch - # any wild memory issues - # Any changes to this value should be carefully considered and tested! - # As we have a bootstrap node acting as an access point for churning nodes and client, - # The memory usage here will be significantly higher here than in the benchmark test, - # where we don't have a bootstrap node. - run: | - node_peak_mem_limit_mb="300" # mb - - peak_mem_usage=$( - rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/{print $2}' | - sort -n | - tail -n 1 - ) - echo "Node memory usage: $peak_mem_usage MB" - - if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then - echo "Node memory usage exceeded threshold: $peak_mem_usage MB" - exit 1 - fi - if: always() - - - name: Check client memory usage - shell: bash - # limits here are lower that benchmark tests as there is less going on. - run: | - client_peak_mem_limit_mb="1024" # mb - client_avg_mem_limit_mb="512" # mb - - peak_mem_usage=$( - rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/{print $2}' | - sort -n | - tail -n 1 - ) - echo "Peak memory usage: $peak_mem_usage MB" - if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then - echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" - exit 1 - fi - - total_mem=$( - rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' - ) - num_of_times=$( - rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "num_of_times: $num_of_times" - echo "Total memory is: $total_mem" - average_mem=$(($total_mem/$(($num_of_times)))) - echo "Average memory is: $average_mem" - - if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then - echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" - exit 1 - fi - - - name: Check node swarm_driver handling statistics - shell: bash - # With the latest improvements, swarm_driver will be in high chance - # has no super long handling (longer than 1s). - # As the `rg` cmd will fail the shell directly if no entry find, - # hence not covering it. - # Be aware that if do need to looking for handlings longer than second, it shall be: - # rg "SwarmCmd handled in [^m,µ,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats - run: | - num_of_times=$( - rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "Number of long cmd handling times: $num_of_times" - total_long_handling_ms=$( - rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - ) - echo "Total cmd long handling time is: $total_long_handling_ms ms" - average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - echo "Average cmd long handling time is: $average_handling_ms ms" - total_long_handling=$(($total_long_handling_ms)) - total_num_of_times=$(($num_of_times)) - num_of_times=$( - rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "Number of long event handling times: $num_of_times" - total_long_handling_ms=$( - rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - ) - echo "Total event long handling time is: $total_long_handling_ms ms" - average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - echo "Average event long handling time is: $average_handling_ms ms" - total_long_handling=$(($total_long_handling_ms+$total_long_handling)) - total_num_of_times=$(($num_of_times+$total_num_of_times)) - average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) - echo "Total swarm_driver long handling times is: $total_num_of_times" - echo "Total swarm_driver long handling duration is: $total_long_handling ms" - echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" - - - name: Verify reward forwarding using rg - shell: bash - timeout-minutes: 1 - run: | - min_reward_forwarding_times="100" - reward_forwarding_count=$(rg "Reward forwarding completed sending spend" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Carried out $reward_forwarding_count reward forwardings" - if (( $(echo "$reward_forwarding_count < $min_reward_forwarding_times" | bc -l) )); then - echo "Reward forwarding times below the threshold: $min_reward_forwarding_times" - exit 1 - fi - if: always() - - - name: Upload payment wallet initialization log - uses: actions/upload-artifact@main - with: - name: payment_wallet_initialization_log - path: initial_balance_from_faucet.txt - continue-on-error: true - if: always() - - - name: Move faucet log to the working folder - run: | - echo "current folder is:" - pwd - echo "SAFE_DATA_PATH has: " - ls -l $SAFE_DATA_PATH - echo "test_faucet foder has: " - ls -l $SAFE_DATA_PATH/test_faucet - echo "logs folder has: " - ls -l $SAFE_DATA_PATH/test_faucet/logs - mv $FAUCET_LOG_PATH/*.log ./faucet_log.log - env: - SN_LOG: "all" - continue-on-error: true - if: always() - timeout-minutes: 1 - - - name: Move bootstrap_node log to the working directory - run: | - ls -l $BOOTSTRAP_NODE_DATA_PATH - mv $BOOTSTRAP_NODE_DATA_PATH/safenode.log ./bootstrap_node.log - continue-on-error: true - if: always() - timeout-minutes: 1 - - - name: Upload faucet log - uses: actions/upload-artifact@main - with: - name: memory_check_faucet_log - path: faucet_log.log - continue-on-error: true - if: always() - - - name: Upload bootstrap_node log - uses: actions/upload-artifact@main - with: - name: memory_check_bootstrap_node_log - path: bootstrap_node.log - continue-on-error: true - if: always() - - - name: Upload spend DAG and statistics - uses: actions/upload-artifact@main - with: - name: memory_check_spend_dag_and_statistics - path: spend_dag_and_statistics.txt - continue-on-error: true - if: always() +# jobs: +# memory-check: +# runs-on: ubuntu-latest +# steps: +# - name: Checkout code +# uses: actions/checkout@v4 + +# - name: Check we're on the right commit +# run: git log -1 --oneline + +# - name: Install Rust +# uses: dtolnay/rust-toolchain@stable + +# - uses: Swatinem/rust-cache@v2 +# continue-on-error: true + +# - name: install ripgrep +# shell: bash +# run: sudo apt-get install -y ripgrep + +# - name: Build binaries +# run: cargo build --release --bin safe --bin safenode +# timeout-minutes: 30 + +# - name: Build faucet binary with gifting +# run: cargo build --release --bin faucet --features gifting +# timeout-minutes: 30 + +# - name: Build tests +# run: cargo test --release -p sn_node --test data_with_churn --test verify_routing_table --no-run +# timeout-minutes: 30 + +# - name: Start a node instance that does not undergo churn +# run: | +# mkdir -p $BOOTSTRAP_NODE_DATA_PATH +# ./target/release/safenode --first \ +# --root-dir $BOOTSTRAP_NODE_DATA_PATH --log-output-dest $BOOTSTRAP_NODE_DATA_PATH --local --owner=bootstrap & +# sleep 10 +# env: +# SN_LOG: "all" + +# - name: Set SAFE_PEERS +# run: | +# safe_peers=$(rg "Local node is listening .+ on \".+\"" $BOOTSTRAP_NODE_DATA_PATH -u | \ +# rg '/ip4.*$' -m1 -o | rg '"' -r '') +# echo "SAFE_PEERS=$safe_peers" >> $GITHUB_ENV + +# - name: Check SAFE_PEERS was set +# shell: bash +# run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" + +# - name: Start a node instance to be restarted +# run: | +# mkdir -p $RESTART_TEST_NODE_DATA_PATH +# ./target/release/safenode \ +# --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restart & +# sleep 10 +# env: +# SN_LOG: "all" + +# - name: Start a local network +# env: +# SN_LOG: "all" +# uses: maidsafe/sn-local-testnet-action@main +# with: +# action: start +# build: true +# faucet-path: target/release/faucet +# interval: 2000 +# join: true +# node-path: target/release/safenode +# owner-prefix: node +# platform: ubuntu-latest +# set-safe-peers: false + +# # In this case we did *not* want SAFE_PEERS to be set to another value by starting the testnet +# - name: Check SAFE_PEERS was not changed +# shell: bash +# run: echo "The SAFE_PEERS variable has been set to ${SAFE_PEERS}" + +# - name: Create and fund a wallet to pay for files storage +# run: | +# echo "Obtaining address for use with the faucet..." +# ./target/release/safe --log-output-dest=data-dir wallet create --no-password +# address=$(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) +# echo "Sending tokens to the faucet at $address" +# ./target/release/faucet --log-output-dest=data-dir send 5000000 $address > initial_balance_from_faucet.txt +# cat initial_balance_from_faucet.txt +# cat initial_balance_from_faucet.txt | tail -n 1 > transfer_hex +# cat transfer_hex +# ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex +# env: +# SN_LOG: "all" +# timeout-minutes: 15 + +# - name: Move faucet log to the working folder +# run: | +# echo "SAFE_DATA_PATH has: " +# ls -l $SAFE_DATA_PATH +# echo "test_faucet foder has: " +# ls -l $SAFE_DATA_PATH/test_faucet +# echo "logs folder has: " +# ls -l $SAFE_DATA_PATH/test_faucet/logs +# mv $FAUCET_LOG_PATH/faucet.log ./faucet_log.log +# continue-on-error: true +# if: always() +# timeout-minutes: 1 + +# - name: Download 95mb file to be uploaded with the safe client +# shell: bash +# run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip + +# # The resources file we upload may change, and with it mem consumption. +# # Be aware! +# - name: Start a client to upload files +# # -p makes files public +# run: | +# ls -l +# ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data.zip" --retry-strategy quick -p +# env: +# SN_LOG: "all" +# timeout-minutes: 25 + +# # this check needs to be after some transfer activity +# - name: Check we're warned about using default genesis +# run: | +# git log -1 --oneline +# ls -la $RESTART_TEST_NODE_DATA_PATH +# cat $RESTART_TEST_NODE_DATA_PATH/safenode.log +# - name: Check we're warned about using default genesis +# run: | +# git log -1 --oneline +# ls -la $BOOTSTRAP_NODE_DATA_PATH +# cat $BOOTSTRAP_NODE_DATA_PATH/safenode.log + +# - name: Check we're warned about using default genesis +# run: | +# git log -1 --oneline +# ls -la $NODE_DATA_PATH +# rg "USING DEFAULT" "$NODE_DATA_PATH" -u +# shell: bash + +# # Uploading same file using different client shall not incur any payment neither uploads +# # Note rg will throw an error directly in case of failed to find a matching pattern. +# - name: Start a different client to upload the same file +# run: | +# pwd +# mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first +# ls -l $SAFE_DATA_PATH +# ls -l $SAFE_DATA_PATH/client_first +# mkdir $SAFE_DATA_PATH/client +# ls -l $SAFE_DATA_PATH +# mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs +# ls -l $CLIENT_DATA_PATH +# cp ./the-test-data.zip ./the-test-data_1.zip +# ./target/release/safe --log-output-dest=data-dir wallet create --no-replace --no-password +# ./target/release/faucet --log-output-dest=data-dir send 5000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) > initial_balance_from_faucet_1.txt +# cat initial_balance_from_faucet_1.txt +# cat initial_balance_from_faucet_1.txt | tail -n 1 > transfer_hex +# cat transfer_hex +# ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex +# ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data_1.zip" --retry-strategy quick -p > second_upload.txt +# cat second_upload.txt +# rg "New wallet balance: 5000000.000000000" second_upload.txt -c --stats +# env: +# SN_LOG: "all" +# timeout-minutes: 25 + +# - name: Stop the restart node +# run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid ) + +# - name: Start the restart node again +# run: | +# ./target/release/safenode \ +# --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restarted & +# sleep 10 +# env: +# SN_LOG: "all" + +# - name: Assert we've reloaded some chunks +# run: rg "Existing record loaded" $RESTART_TEST_NODE_DATA_PATH + +# - name: Chunks data integrity during nodes churn +# run: cargo test --release -p sn_node --test data_with_churn -- --nocapture +# env: +# TEST_DURATION_MINS: 5 +# TEST_TOTAL_CHURN_CYCLES: 15 +# SN_LOG: "all" +# timeout-minutes: 30 + +# - name: Check current files +# run: ls -la +# - name: Check safenode file +# run: ls /home/runner/work/safe_network/safe_network/target/release + +# - name: Check there was no restart issues +# run: | +# if rg 'Failed to execute hard-restart command' $NODE_DATA_PATH; then +# echo "Restart issues detected" +# exit 1 +# else +# echo "No restart issues detected" +# fi + +# - name: Verify the routing tables of the nodes +# run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture +# env: +# SLEEP_BEFORE_VERIFICATION: 300 +# timeout-minutes: 10 + +# - name: Verify restart of nodes using rg +# shell: bash +# timeout-minutes: 1 +# # get the counts, then the specific line, and then the digit count only +# # then check we have an expected level of restarts +# # TODO: make this use an env var, or relate to testnet size +# run: | +# restart_count=$(rg "Node is restarting in" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "Restart $restart_count nodes" +# peer_removed=$(rg "PeerRemovedFromRoutingTable" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "PeerRemovedFromRoutingTable $peer_removed times" +# if [ $peer_removed -lt $restart_count ]; then +# echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" +# exit 1 +# fi +# node_count=$(ls $NODE_DATA_PATH | wc -l) +# echo "Node dir count is $node_count" +# # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here +# # if [ $restart_count -lt $node_count ]; then +# # echo "Restart count of: $restart_count is less than the node count of: $node_count" +# # exit 1 +# # fi + +# - name: Verify data replication using rg +# shell: bash +# timeout-minutes: 1 +# # get the counts, then the specific line, and then the digit count only +# # then check we have an expected level of replication +# # TODO: make this use an env var, or relate to testnet size +# # As the bootstrap_node using separate folder for logging, +# # hence the folder input to rg needs to cover that as well. +# run: | +# sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "Sent $sending_list_count replication lists" +# received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "Received $received_list_count replication lists" +# fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "Carried out $fetching_attempt_count fetching attempts" +# if: always() + +# - name: Start a client to download files +# run: | +# ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick +# ls -l $CLIENT_DATA_PATH/safe_files +# downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) +# if [ $downloaded_files -lt 1 ]; then +# echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" +# exit 1 +# fi +# env: +# SN_LOG: "all" +# timeout-minutes: 10 + +# # Download the same files again to ensure files won't get corrupted. +# - name: Start a client to download the same files again +# run: | +# ./target/release/safe --log-output-dest=data-dir files download --show-holders --retry-strategy quick +# ls -l $CLIENT_DATA_PATH/safe_files +# downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) +# if [ $downloaded_files -lt 1 ]; then +# echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" +# exit 1 +# fi +# file_size1=$(stat -c "%s" ./the-test-data_1.zip) +# file_size2=$(stat -c "%s" $CLIENT_DATA_PATH/safe_files/the-test-data_1.zip) +# if [ $file_size1 != $file_size2 ]; then +# echo "The downloaded file has a different size $file_size2 to the original $file_size1." +# exit 1 +# fi +# env: +# SN_LOG: "all" +# timeout-minutes: 10 + +# - name: Audit from genesis to collect entire spend DAG and dump to a dot file +# run: | +# ./target/release/safe --log-output-dest=data-dir wallet audit --dot --sk-str 49113d2083f57a976076adbe85decb75115820de1e6e74b47e0429338cef124a > spend_dag_and_statistics.txt +# echo "==============================================================================" +# cat spend_dag_and_statistics.txt +# env: +# SN_LOG: "all" +# timeout-minutes: 5 +# if: always() + +# - name: Ensure discord_ids decrypted +# run: | +# rg 'node_' ./spend_dag_and_statistics.txt -o +# timeout-minutes: 1 +# if: always() + +# - name: Check nodes running +# shell: bash +# timeout-minutes: 1 +# continue-on-error: true +# run: pgrep safenode | wc -l +# if: always() + +# - name: Wait before verifying reward forwarding +# run: sleep 300 + +# - name: Stop the local network and upload logs +# if: always() +# uses: maidsafe/sn-local-testnet-action@main +# with: +# action: stop +# log_file_prefix: safe_test_logs_memcheck +# platform: ubuntu-latest +# build: true + +# - name: Check node memory usage +# shell: bash +# # The resources file and churning chunk_size we upload may change, and with it mem consumption. +# # This is set to a value high enough to allow for some variation depending on +# # resources and node location in the network, but hopefully low enough to catch +# # any wild memory issues +# # Any changes to this value should be carefully considered and tested! +# # As we have a bootstrap node acting as an access point for churning nodes and client, +# # The memory usage here will be significantly higher here than in the benchmark test, +# # where we don't have a bootstrap node. +# run: | +# node_peak_mem_limit_mb="300" # mb + +# peak_mem_usage=$( +# rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | +# awk -F':' '/"memory_used_mb":/{print $2}' | +# sort -n | +# tail -n 1 +# ) +# echo "Node memory usage: $peak_mem_usage MB" + +# if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then +# echo "Node memory usage exceeded threshold: $peak_mem_usage MB" +# exit 1 +# fi +# if: always() + +# - name: Check client memory usage +# shell: bash +# # limits here are lower that benchmark tests as there is less going on. +# run: | +# client_peak_mem_limit_mb="1024" # mb +# client_avg_mem_limit_mb="512" # mb + +# peak_mem_usage=$( +# rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | +# awk -F':' '/"memory_used_mb":/{print $2}' | +# sort -n | +# tail -n 1 +# ) +# echo "Peak memory usage: $peak_mem_usage MB" +# if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then +# echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" +# exit 1 +# fi + +# total_mem=$( +# rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | +# awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' +# ) +# num_of_times=$( +# rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | +# rg "(\d+) matches" | +# rg "\d+" -o +# ) +# echo "num_of_times: $num_of_times" +# echo "Total memory is: $total_mem" +# average_mem=$(($total_mem/$(($num_of_times)))) +# echo "Average memory is: $average_mem" + +# if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then +# echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" +# exit 1 +# fi + +# - name: Check node swarm_driver handling statistics +# shell: bash +# # With the latest improvements, swarm_driver will be in high chance +# # has no super long handling (longer than 1s). +# # As the `rg` cmd will fail the shell directly if no entry find, +# # hence not covering it. +# # Be aware that if do need to looking for handlings longer than second, it shall be: +# # rg "SwarmCmd handled in [^m,µ,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats +# run: | +# num_of_times=$( +# rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | +# rg "(\d+) matches" | +# rg "\d+" -o +# ) +# echo "Number of long cmd handling times: $num_of_times" +# total_long_handling_ms=$( +# rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | +# awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' +# ) +# echo "Total cmd long handling time is: $total_long_handling_ms ms" +# average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) +# echo "Average cmd long handling time is: $average_handling_ms ms" +# total_long_handling=$(($total_long_handling_ms)) +# total_num_of_times=$(($num_of_times)) +# num_of_times=$( +# rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | +# rg "(\d+) matches" | +# rg "\d+" -o +# ) +# echo "Number of long event handling times: $num_of_times" +# total_long_handling_ms=$( +# rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | +# awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' +# ) +# echo "Total event long handling time is: $total_long_handling_ms ms" +# average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) +# echo "Average event long handling time is: $average_handling_ms ms" +# total_long_handling=$(($total_long_handling_ms+$total_long_handling)) +# total_num_of_times=$(($num_of_times+$total_num_of_times)) +# average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) +# echo "Total swarm_driver long handling times is: $total_num_of_times" +# echo "Total swarm_driver long handling duration is: $total_long_handling ms" +# echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" + +# - name: Verify reward forwarding using rg +# shell: bash +# timeout-minutes: 1 +# run: | +# min_reward_forwarding_times="100" +# reward_forwarding_count=$(rg "Reward forwarding completed sending spend" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "Carried out $reward_forwarding_count reward forwardings" +# if (( $(echo "$reward_forwarding_count < $min_reward_forwarding_times" | bc -l) )); then +# echo "Reward forwarding times below the threshold: $min_reward_forwarding_times" +# exit 1 +# fi +# if: always() + +# - name: Upload payment wallet initialization log +# uses: actions/upload-artifact@main +# with: +# name: payment_wallet_initialization_log +# path: initial_balance_from_faucet.txt +# continue-on-error: true +# if: always() + +# - name: Move faucet log to the working folder +# run: | +# echo "current folder is:" +# pwd +# echo "SAFE_DATA_PATH has: " +# ls -l $SAFE_DATA_PATH +# echo "test_faucet foder has: " +# ls -l $SAFE_DATA_PATH/test_faucet +# echo "logs folder has: " +# ls -l $SAFE_DATA_PATH/test_faucet/logs +# mv $FAUCET_LOG_PATH/*.log ./faucet_log.log +# env: +# SN_LOG: "all" +# continue-on-error: true +# if: always() +# timeout-minutes: 1 + +# - name: Move bootstrap_node log to the working directory +# run: | +# ls -l $BOOTSTRAP_NODE_DATA_PATH +# mv $BOOTSTRAP_NODE_DATA_PATH/safenode.log ./bootstrap_node.log +# continue-on-error: true +# if: always() +# timeout-minutes: 1 + +# - name: Upload faucet log +# uses: actions/upload-artifact@main +# with: +# name: memory_check_faucet_log +# path: faucet_log.log +# continue-on-error: true +# if: always() + +# - name: Upload bootstrap_node log +# uses: actions/upload-artifact@main +# with: +# name: memory_check_bootstrap_node_log +# path: bootstrap_node.log +# continue-on-error: true +# if: always() + +# - name: Upload spend DAG and statistics +# uses: actions/upload-artifact@main +# with: +# name: memory_check_spend_dag_and_statistics +# path: spend_dag_and_statistics.txt +# continue-on-error: true +# if: always() diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index b95a0a3488..98ee999b06 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -5,7 +5,7 @@ on: # on main, we want to know that all commits are passing at a glance, any deviation should help bisecting errors # the merge run checks should show on master and enable this clear test/passing history merge_group: - branches: [main, alpha*, beta*, rc*] + branches: [main, evm-dev] pull_request: branches: ["*"] @@ -73,9 +73,9 @@ jobs: # See https://doc.rust-lang.org/rustdoc/lints.html for lints that are 'warning' by default. run: RUSTDOCFLAGS="--deny=warnings" cargo doc --no-deps - - name: Check local-discovery is not a default feature + - name: Check local is not a default feature shell: bash - run: if [[ ! $(cargo metadata --no-deps --format-version 1 | jq -r '.packages[].features.default[]? | select(. == "local-discovery")') ]]; then echo "local-discovery is not a default feature in any package."; else echo "local-discovery is a default feature in at least one package." && exit 1; fi + run: if [[ ! $(cargo metadata --no-deps --format-version 1 | jq -r '.packages[].features.default[]? | select(. == "local")') ]]; then echo "local is not a default feature in any package."; else echo "local is a default feature in at least one package." && exit 1; fi - name: Clean out the target directory run: cargo clean @@ -110,24 +110,13 @@ jobs: - uses: Swatinem/rust-cache@v2 - - name: Run CLI tests - timeout-minutes: 25 - run: cargo test --release --package sn_cli -- --skip test_acc_packet_ - - # We do not run client `--tests` here as they can require a network - - name: Run client tests - timeout-minutes: 25 - run: | - cargo test --release --package sn_client --lib - cargo test --release --package sn_client --doc - - name: Run node tests timeout-minutes: 25 run: cargo test --release --package sn_node --lib - name: Run network tests timeout-minutes: 25 - run: cargo test --release --package sn_networking + run: cargo test --release --package sn_networking --features="open-metrics" - name: Run protocol tests timeout-minutes: 25 @@ -162,7 +151,7 @@ jobs: - os: windows-latest safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - os: macos-latest - safe_path: /Users/runner/Library/Application Support/safe + safe_path: /Users/runner/Library/Application\ Support/safe steps: - uses: actions/checkout@v4 @@ -171,166 +160,182 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: Build binaries - run: cargo build --release --bin safenode --bin safe - timeout-minutes: 30 - - - name: Build faucet binary - run: cargo build --release --bin faucet --features gifting + run: cargo build --release --features local --bin safenode --bin autonomi timeout-minutes: 30 - name: Start a local network uses: maidsafe/sn-local-testnet-action@main with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ${{ matrix.os }} build: true - - name: Check SAFE_PEERS was set + - name: Check if SAFE_PEERS and EVM_NETWORK are set shell: bash run: | if [[ -z "$SAFE_PEERS" ]]; then echo "The SAFE_PEERS variable has not been set" exit 1 + elif [[ -z "$EVM_NETWORK" ]]; then + echo "The EVM_NETWORK variable has not been set" + exit 1 else echo "SAFE_PEERS has been set to $SAFE_PEERS" + echo "EVM_NETWORK has been set to $EVM_NETWORK" fi # only these unit tests require a network, the rest are run above - - name: Run sn_client --tests - run: cargo test --package sn_client --release --tests + - name: Run autonomi --tests + run: cargo test --package autonomi --tests -- --nocapture env: - SN_LOG: "all" + SN_LOG: "v" # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 15 - - name: Create and fund a wallet to pay for files storage - run: | - ./target/release/safe --log-output-dest=data-dir wallet create --no-password - ./target/release/faucet --log-output-dest=data-dir send 1000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - env: - SN_LOG: "all" - timeout-minutes: 5 + # FIXME: do this in a generic way for localtestnets + - name: export default secret key + if: matrix.os != 'windows-latest' + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash + - name: Set secret key for Windows + if: matrix.os == 'windows-latest' + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh - - name: Start a client to upload cost estimate - run: ./target/release/safe --log-output-dest=data-dir files estimate "./resources" + - name: Get file cost + run: ./target/release/autonomi --log-output-dest=data-dir file cost "./resources" env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 15 - - name: Start a client to upload files - run: ./target/release/safe --log-output-dest=data-dir files upload "./resources" --retry-strategy quick + - name: File upload + run: ./target/release/autonomi --log-output-dest=data-dir file upload "./resources" > ./upload_output 2>&1 env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 15 - - name: Start a client to download files - run: ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick + - name: parse address (unix) + if: matrix.os != 'windows-latest' + run: | + UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output) + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: parse address (win) + if: matrix.os == 'windows-latest' + run: | + $UPLOAD_ADDRESS = rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh + + - name: File Download + run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources env: - SN_LOG: "all" - timeout-minutes: 2 + SN_LOG: "v" + timeout-minutes: 5 + + - name: Generate register signing key + run: ./target/release/autonomi --log-output-dest=data-dir register generate-key - # Client FoldersApi tests against local network - - name: Client FoldersApi tests against local network - run: cargo test --release --package sn_client --test folders_api + - name: Create register (writeable by owner) + run: ./target/release/autonomi --log-output-dest=data-dir register create baobao 123 > ./register_create_output 2>&1 env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 10 - # CLI Acc-Packet files and folders tests against local network - - name: CLI Acc-Packet files and folders tests - run: cargo test --release -p sn_cli test_acc_packet -- --nocapture + - name: parse register address (unix) + if: matrix.os != 'windows-latest' + run: | + REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output) + echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: parse register address (win) + if: matrix.os == 'windows-latest' + run: | + $REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output + echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh + + - name: Get register + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} env: - SN_LOG: "all" - timeout-minutes: 10 + SN_LOG: "v" + timeout-minutes: 5 - - name: Start a client to create a register writable by the owner only - run: ./target/release/safe --log-output-dest=data-dir register create -n baobao + - name: Edit register + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.REGISTER_ADDRESS }} 456 env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 10 - - name: Start a client to get a register writable by the owner only - run: ./target/release/safe --log-output-dest=data-dir register get -n baobao + - name: Get register (after edit) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} env: - SN_LOG: "all" - timeout-minutes: 2 + SN_LOG: "v" + timeout-minutes: 5 - - name: Start a client to edit a register writable by the owner only - run: ./target/release/safe --log-output-dest=data-dir register edit -n baobao wood + - name: Create Public Register (writeable by anyone) + run: ./target/release/autonomi --log-output-dest=data-dir register create bao 111 --public > ./register_public_create_output 2>&1 env: - SN_LOG: "all" - timeout-minutes: 10 - # - # Next two steps are same with a slight difference in the way they write to the output file (GITHUB_OUTPUT vs ENV:GITHUB_OUTPUT) - # - - name: Start a client to create a register writable by anyone - id: register-address + SN_LOG: "v" + timeout-minutes: 5 + + - name: parse public register address (unix) if: matrix.os != 'windows-latest' - run: echo "$(./target/release/safe --log-output-dest=data-dir register create -n trycatch -p | rg REGISTER_ADDRESS )" >> $GITHUB_OUTPUT - env: - SN_LOG: "all" - timeout-minutes: 10 + run: | + PUBLIC_REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output) + echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" >> $GITHUB_ENV + shell: bash - - name: Start a client to create a register writable by anyone - id: register-address-windows + - name: parse public register address (win) if: matrix.os == 'windows-latest' - run: echo "$(./target/release/safe --log-output-dest=data-dir register create -n trycatch -p | rg REGISTER_ADDRESS )" >> $ENV:GITHUB_OUTPUT - env: - SN_LOG: "all" - timeout-minutes: 10 + run: | + $PUBLIC_REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output + echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh - - name: Start a client to get a register writable by anyone (current client is the owner) - run: ./target/release/safe --log-output-dest=data-dir register get -n trycatch + - name: Get Public Register (current key is the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} env: - SN_LOG: "all" - timeout-minutes: 2 + SN_LOG: "v" + timeout-minutes: 5 - - name: Start a client to edit a register writable by anyone (current client is the owner) - run: ./target/release/safe --log-output-dest=data-dir register edit -n trycatch wood + - name: Edit Public Register (current key is the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 222 env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 10 - - name: Delete client subdir to generate new client + - name: Delete current register signing key shell: bash run: rm -rf ${{ matrix.safe_path }}/client - # - # Next four steps are same with a slight difference in the which output step they read from - # - - name: Start a client to get a register writable by anyone (new client is not the owner) - if: matrix.os != 'windows-latest' - run: ./target/release/safe --log-output-dest=data-dir register get ${{ steps.register-address.outputs.REGISTER_ADDRESS }} + + - name: Generate new register signing key + run: ./target/release/autonomi --log-output-dest=data-dir register generate-key + + - name: Get Public Register (new signing key is not the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 2 - - name: Start a client to edit a register writable by anyone (new client is not the owner) - if: matrix.os != 'windows-latest' - run: ./target/release/safe --log-output-dest=data-dir register edit ${{ steps.register-address.outputs.REGISTER_ADDRESS }} water + - name: Edit Public Register (new signing key is not the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 333 env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 10 - - name: Start a client to get a register writable by anyone (new client is not the owner) - if: matrix.os == 'windows-latest' - run: ./target/release/safe --log-output-dest=data-dir register get ${{ steps.register-address-windows.outputs.REGISTER_ADDRESS }} + - name: Get Public Register (new signing key is not the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 2 - - name: Start a client to edit a register writable by anyone (new client is not the owner) - if: matrix.os == 'windows-latest' - run: ./target/release/safe --log-output-dest=data-dir register edit ${{ steps.register-address-windows.outputs.REGISTER_ADDRESS }} water - env: - SN_LOG: "all" - timeout-minutes: 10 - - name: Stop the local network and upload logs if: always() uses: maidsafe/sn-local-testnet-action@main @@ -339,84 +344,84 @@ jobs: log_file_prefix: safe_test_logs_e2e platform: ${{ matrix.os }} - spend_test: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: spend tests against network - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 + # spend_test: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: spend tests against network + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # os: [ubuntu-latest, windows-latest, macos-latest] + # steps: + # - uses: actions/checkout@v4 - - name: Install Rust - uses: dtolnay/rust-toolchain@stable + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + # - uses: Swatinem/rust-cache@v2 - - name: Build binaries - run: cargo build --release --features=local-discovery --bin safenode - timeout-minutes: 30 + # - name: Build binaries + # run: cargo build --release --features=local --bin safenode + # timeout-minutes: 30 - - name: Build faucet binary - run: cargo build --release --bin faucet --features="local-discovery,gifting" - timeout-minutes: 30 + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features="local,gifting" + # timeout-minutes: 30 - - name: Build testing executable - run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 + # - name: Build testing executable + # run: cargo test --release -p sn_node --features=local --test sequential_transfers --test storage_payments --test double_spend --no-run + # env: + # # only set the target dir for windows to bypass the linker issue. + # # happens if we build the node manager via testnet action + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 30 - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi - - name: execute the sequential transfers tests - run: cargo test --release -p sn_node --features="local-discovery" --test sequential_transfers -- --nocapture --test-threads=1 - env: - SN_LOG: "all" - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 + # - name: execute the sequential transfers tests + # run: cargo test --release -p sn_node --features="local" --test sequential_transfers -- --nocapture --test-threads=1 + # env: + # SN_LOG: "all" + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 - - name: execute the storage payment tests - run: cargo test --release -p sn_node --features="local-discovery" --test storage_payments -- --nocapture --test-threads=1 - env: - SN_LOG: "all" - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 + # - name: execute the storage payment tests + # run: cargo test --release -p sn_node --features="local" --test storage_payments -- --nocapture --test-threads=1 + # env: + # SN_LOG: "all" + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 - - name: execute the double spend tests - run: cargo test --release -p sn_node --features="local-discovery" --test double_spend -- --nocapture --test-threads=1 - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 + # - name: execute the double spend tests + # run: cargo test --release -p sn_node --features="local" --test double_spend -- --nocapture --test-threads=1 + # env: + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_spend - platform: ${{ matrix.os }} + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_spend + # platform: ${{ matrix.os }} # # runs with increased node count # spend_simulation: @@ -435,15 +440,15 @@ jobs: # - uses: Swatinem/rust-cache@v2 # - name: Build binaries - # run: cargo build --release --features=local-discovery --bin safenode + # run: cargo build --release --features=local --bin safenode # timeout-minutes: 30 # - name: Build faucet binary - # run: cargo build --release --bin faucet --features="local-discovery,gifting" + # run: cargo build --release --bin faucet --features="local,gifting" # timeout-minutes: 30 # - name: Build testing executable - # run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run + # run: cargo test --release -p sn_node --features=local --test spend_simulation --no-run # env: # # only set the target dir for windows to bypass the linker issue. # # happens if we build the node manager via testnet action @@ -472,7 +477,7 @@ jobs: # fi # - name: execute the spend simulation - # run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture + # run: cargo test --release -p sn_node --features="local" --test spend_simulation -- --nocapture # env: # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} # timeout-minutes: 25 @@ -485,139 +490,138 @@ jobs: # log_file_prefix: safe_test_logs_spend_simulation # platform: ${{ matrix.os }} - token_distribution_test: + # token_distribution_test: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: token distribution test + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # os: [ubuntu-latest, windows-latest, macos-latest] + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + + # - uses: Swatinem/rust-cache@v2 + + # - name: Build binaries + # run: cargo build --release --features=local,distribution --bin safenode + # timeout-minutes: 35 + + # - name: Build faucet binary + # run: cargo build --release --features=local,distribution,gifting --bin faucet + # timeout-minutes: 35 + + # - name: Build testing executable + # run: cargo test --release --features=local,distribution --no-run + # env: + # # only set the target dir for windows to bypass the linker issue. + # # happens if we build the node manager via testnet action + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 35 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi + + # - name: execute token_distribution tests + # run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1 + # env: + # SN_LOG: "all" + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_token_distribution + # platform: ${{ matrix.os }} + + churn: if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: token distribution test + name: Network churning tests runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, windows-latest, macos-latest] + include: + - os: ubuntu-latest + node_data_path: /home/runner/.local/share/safe/node + safe_path: /home/runner/.local/share/safe + - os: windows-latest + node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node + safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + - os: macos-latest + node_data_path: /Users/runner/Library/Application Support/safe/node + safe_path: /Users/runner/Library/Application Support/safe steps: - uses: actions/checkout@v4 - - name: Install Rust - uses: dtolnay/rust-toolchain@stable + - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - name: Build binaries - run: cargo build --release --features=local-discovery,distribution --bin safenode - timeout-minutes: 35 - - - name: Build faucet binary - run: cargo build --release --features=local-discovery,distribution,gifting --bin faucet - timeout-minutes: 35 + run: cargo build --release --features local --bin safenode + timeout-minutes: 30 - - name: Build testing executable - run: cargo test --release --features=local-discovery,distribution --no-run + - name: Build churn tests + run: cargo test --release -p sn_node --features=local --test data_with_churn --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 35 + timeout-minutes: 30 - name: Start a local network uses: maidsafe/sn-local-testnet-action@main with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ${{ matrix.os }} build: true - - name: Check SAFE_PEERS was set + - name: Check if SAFE_PEERS and EVM_NETWORK are set shell: bash run: | if [[ -z "$SAFE_PEERS" ]]; then echo "The SAFE_PEERS variable has not been set" exit 1 + elif [[ -z "$EVM_NETWORK" ]]; then + echo "The EVM_NETWORK variable has not been set" + exit 1 else echo "SAFE_PEERS has been set to $SAFE_PEERS" + echo "EVM_NETWORK has been set to $EVM_NETWORK" fi - - name: execute token_distribution tests - run: cargo test --release --features=local-discovery,distribution token_distribution -- --nocapture --test-threads=1 + - name: Chunks data integrity during nodes churn + run: cargo test --release -p sn_node --features=local --test data_with_churn -- --nocapture env: + TEST_DURATION_MINS: 5 + TEST_TOTAL_CHURN_CYCLES: 15 SN_LOG: "all" CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_token_distribution - platform: ${{ matrix.os }} - - churn: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Network churning tests - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - os: ubuntu-latest - node_data_path: /home/runner/.local/share/safe/node - safe_path: /home/runner/.local/share/safe - - os: windows-latest - node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node - safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - - os: macos-latest - node_data_path: /Users/runner/Library/Application Support/safe/node - safe_path: /Users/runner/Library/Application Support/safe - steps: - - uses: actions/checkout@v4 - - - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - - - name: Build binaries - run: cargo build --release --features local-discovery --bin safenode - timeout-minutes: 30 - - - name: Build faucet binaries - run: cargo build --release --features="local-discovery,gifting" --bin faucet - timeout-minutes: 30 - - - name: Build churn tests - run: cargo test --release -p sn_node --features=local-discovery --test data_with_churn --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - name: Chunks data integrity during nodes churn - run: cargo test --release -p sn_node --features="local-discovery" --test data_with_churn -- --nocapture - env: - TEST_DURATION_MINS: 5 - TEST_TOTAL_CHURN_CYCLES: 15 - SN_LOG: "all" - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 + timeout-minutes: 30 - name: Stop the local network and upload logs if: always() @@ -705,15 +709,11 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: Build binaries - run: cargo build --release --features local-discovery --bin safenode - timeout-minutes: 30 - - - name: Build fuacet binary - run: cargo build --release --features="local-discovery,gifting" --bin faucet + run: cargo build --release --features local --bin safenode timeout-minutes: 30 - name: Build data location and routing table tests - run: cargo test --release -p sn_node --features=local-discovery --test verify_data_location --test verify_routing_table --no-run + run: cargo test --release -p sn_node --features=local --test verify_data_location --test verify_routing_table --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -724,30 +724,33 @@ jobs: uses: maidsafe/sn-local-testnet-action@main with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ${{ matrix.os }} build: true - - name: Check SAFE_PEERS was set + - name: Check if SAFE_PEERS and EVM_NETWORK are set shell: bash run: | if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 + echo "The SAFE_PEERS variable has not been set" + exit 1 + elif [[ -z "$EVM_NETWORK" ]]; then + echo "The EVM_NETWORK variable has not been set" + exit 1 else - echo "SAFE_PEERS has been set to $SAFE_PEERS" + echo "SAFE_PEERS has been set to $SAFE_PEERS" + echo "EVM_NETWORK has been set to $EVM_NETWORK" fi - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 - name: Verify the location of the data on the network - run: cargo test --release -p sn_node --features="local-discovery" --test verify_data_location -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_data_location -- --nocapture env: CHURN_COUNT: 6 SN_LOG: "all" @@ -755,7 +758,7 @@ jobs: timeout-minutes: 25 - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 @@ -800,525 +803,525 @@ jobs: exit 1 fi - faucet_test: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Faucet test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 + # faucet_test: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: Faucet test + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 - - name: install ripgrep - shell: bash - run: sudo apt-get install -y ripgrep + # - name: install ripgrep + # shell: bash + # run: sudo apt-get install -y ripgrep - - name: Build binaries - run: cargo build --release --bin safenode --bin safe - timeout-minutes: 30 + # - name: Build binaries + # run: cargo build --release --bin safenode --bin safe + # timeout-minutes: 30 - - name: Build faucet binary - run: cargo build --release --bin faucet --features gifting - timeout-minutes: 30 + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features gifting + # timeout-minutes: 30 - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ubuntu-latest - build: true + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ubuntu-latest + # build: true - - name: Check we're _not_ warned about using default genesis - run: | - if rg "USING DEFAULT" "${{ matrix.safe_path }}"/*/*/logs; then - exit 1 - fi - shell: bash + # - name: Check we're _not_ warned about using default genesis + # run: | + # if rg "USING DEFAULT" "${{ matrix.safe_path }}"/*/*/logs; then + # exit 1 + # fi + # shell: bash - - name: Move built binaries and clear out target dir - shell: bash - run: | - mv target/release/faucet ~/faucet - mv target/release/safe ~/safe - rm -rf target + # - name: Move built binaries and clear out target dir + # shell: bash + # run: | + # mv target/release/faucet ~/faucet + # mv target/release/safe ~/safe + # rm -rf target - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi - - name: Create and fund a wallet first time - run: | - ~/safe --log-output-dest=data-dir wallet create --no-password - ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt - echo "----------" - cat first.txt - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Create and fund a wallet first time + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt + # echo "----------" + # cat first.txt + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Move faucet log to the working folder - run: | - echo "SAFE_DATA_PATH has: " - ls -l $SAFE_DATA_PATH - echo "test_faucet foder has: " - ls -l $SAFE_DATA_PATH/test_faucet - echo "logs folder has: " - ls -l $SAFE_DATA_PATH/test_faucet/logs - mv $SAFE_DATA_PATH/test_faucet/logs/faucet.log ./faucet_log.log - env: - SN_LOG: "all" - SAFE_DATA_PATH: /home/runner/.local/share/safe - continue-on-error: true - if: always() - timeout-minutes: 1 + # - name: Move faucet log to the working folder + # run: | + # echo "SAFE_DATA_PATH has: " + # ls -l $SAFE_DATA_PATH + # echo "test_faucet foder has: " + # ls -l $SAFE_DATA_PATH/test_faucet + # echo "logs folder has: " + # ls -l $SAFE_DATA_PATH/test_faucet/logs + # mv $SAFE_DATA_PATH/test_faucet/logs/faucet.log ./faucet_log.log + # env: + # SN_LOG: "all" + # SAFE_DATA_PATH: /home/runner/.local/share/safe + # continue-on-error: true + # if: always() + # timeout-minutes: 1 - - name: Upload faucet log - uses: actions/upload-artifact@main - with: - name: faucet_test_first_faucet_log - path: faucet_log.log - continue-on-error: true - if: always() + # - name: Upload faucet log + # uses: actions/upload-artifact@main + # with: + # name: faucet_test_first_faucet_log + # path: faucet_log.log + # continue-on-error: true + # if: always() - - name: Create and fund a wallet second time - run: | - ls -l /home/runner/.local/share - ls -l /home/runner/.local/share/safe - rm -rf /home/runner/.local/share/safe/test_faucet - rm -rf /home/runner/.local/share/safe/test_genesis - rm -rf /home/runner/.local/share/safe/client - ~/safe --log-output-dest=data-dir wallet create --no-password - ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt - echo "----------" - cat second.txt - if grep "genesis is already spent" second.txt; then - echo "Duplicated faucet rejected" - else - echo "Duplicated faucet not rejected!" - exit 1 - fi - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Create and fund a wallet second time + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt + # echo "----------" + # cat second.txt + # if grep "genesis is already spent" second.txt; then + # echo "Duplicated faucet rejected" + # else + # echo "Duplicated faucet not rejected!" + # exit 1 + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Create and fund a wallet with different keypair - run: | - ls -l /home/runner/.local/share - ls -l /home/runner/.local/share/safe - rm -rf /home/runner/.local/share/safe/test_faucet - rm -rf /home/runner/.local/share/safe/test_genesis - rm -rf /home/runner/.local/share/safe/client - ~/safe --log-output-dest=data-dir wallet create --no-password - if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then - echo "Faucet with different genesis key not rejected!" - exit 1 - else - echo "Faucet with different genesis key rejected" - fi - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Create and fund a wallet with different keypair + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # ~/safe --log-output-dest=data-dir wallet create --no-password + # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then + # echo "Faucet with different genesis key not rejected!" + # exit 1 + # else + # echo "Faucet with different genesis key rejected" + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Build faucet binary again without the gifting feature - run: cargo build --release --bin faucet - timeout-minutes: 30 + # - name: Build faucet binary again without the gifting feature + # run: cargo build --release --bin faucet + # timeout-minutes: 30 - - name: Start up a faucet in server mode - run: | - ls -l /home/runner/.local/share - ls -l /home/runner/.local/share/safe - rm -rf /home/runner/.local/share/safe/test_faucet - rm -rf /home/runner/.local/share/safe/test_genesis - rm -rf /home/runner/.local/share/safe/client - target/release/faucet server & - sleep 60 - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Start up a faucet in server mode + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # target/release/faucet server & + # sleep 60 + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: check there is no upload happens - shell: bash - run: | - if grep -r "NanoTokens(10) }, Output" $NODE_DATA_PATH - then - echo "We find ongoing upload !" - exit 1 - fi - env: - NODE_DATA_PATH: /home/runner/.local/share/safe/node - timeout-minutes: 1 + # - name: check there is no upload happens + # shell: bash + # run: | + # if grep -r "NanoTokens(10) }, Output" $NODE_DATA_PATH + # then + # echo "We find ongoing upload !" + # exit 1 + # fi + # env: + # NODE_DATA_PATH: /home/runner/.local/share/safe/node + # timeout-minutes: 1 - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - platform: ubuntu-latest - log_file_prefix: safe_test_logs_faucet + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # platform: ubuntu-latest + # log_file_prefix: safe_test_logs_faucet - large_file_upload_test: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Large file upload - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 + # large_file_upload_test: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: Large file upload + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 - - name: install ripgrep - shell: bash - run: sudo apt-get install -y ripgrep + # - name: install ripgrep + # shell: bash + # run: sudo apt-get install -y ripgrep - - name: Check the available space - run: | - df - echo "Home dir:" - du -sh /home/runner/ - echo "Home subdirs:" - du -sh /home/runner/*/ - echo "PWD:" - du -sh . - echo "PWD subdirs:" - du -sh */ - - - name: Download material, 1.1G - shell: bash - run: | - wget https://releases.ubuntu.com/14.04.6/ubuntu-14.04.6-desktop-i386.iso - ls -l + # - name: Check the available space + # run: | + # df + # echo "Home dir:" + # du -sh /home/runner/ + # echo "Home subdirs:" + # du -sh /home/runner/*/ + # echo "PWD:" + # du -sh . + # echo "PWD subdirs:" + # du -sh */ + + # - name: Download material, 1.1G + # shell: bash + # run: | + # wget https://releases.ubuntu.com/14.04.6/ubuntu-14.04.6-desktop-i386.iso + # ls -l - - name: Build binaries - run: cargo build --release --bin safenode --bin safe - timeout-minutes: 30 + # - name: Build binaries + # run: cargo build --release --bin safenode --bin safe + # timeout-minutes: 30 - - name: Build faucet binary - run: cargo build --release --bin faucet --features gifting - timeout-minutes: 30 + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features gifting + # timeout-minutes: 30 - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ubuntu-latest - build: true + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ubuntu-latest + # build: true - - name: Check we're _not_ warned about using default genesis - run: | - if rg "USING DEFAULT" "${{ matrix.safe_path }}"/*/*/logs; then - exit 1 - fi - shell: bash + # - name: Check we're _not_ warned about using default genesis + # run: | + # if rg "USING DEFAULT" "${{ matrix.safe_path }}"/*/*/logs; then + # exit 1 + # fi + # shell: bash - # The test currently fails because the GH runner runs out of disk space. So we clear out the target dir here. - # Might be related to additional deps used in the codebase. - - name: Move built binaries and clear out target dir - shell: bash - run: | - mv target/release/faucet ~/faucet - mv target/release/safe ~/safe - rm -rf target + # # The test currently fails because the GH runner runs out of disk space. So we clear out the target dir here. + # # Might be related to additional deps used in the codebase. + # - name: Move built binaries and clear out target dir + # shell: bash + # run: | + # mv target/release/faucet ~/faucet + # mv target/release/safe ~/safe + # rm -rf target - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi - - name: Check the available space post download - run: | - df - echo "Home dir:" - du -sh /home/runner/ - echo "Home subdirs:" - du -sh /home/runner/*/ - echo "PWD:" - du -sh . - echo "PWD subdirs:" - du -sh */ - - - name: Create and fund a wallet to pay for files storage - run: | - ~/safe --log-output-dest=data-dir wallet create --no-password - ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Check the available space post download + # run: | + # df + # echo "Home dir:" + # du -sh /home/runner/ + # echo "Home subdirs:" + # du -sh /home/runner/*/ + # echo "PWD:" + # du -sh . + # echo "PWD subdirs:" + # du -sh */ + + # - name: Create and fund a wallet to pay for files storage + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Start a client to upload - run: ~/safe --log-output-dest=data-dir files upload "ubuntu-14.04.6-desktop-i386.iso" --retry-strategy quick - env: - SN_LOG: "all" - timeout-minutes: 30 + # - name: Start a client to upload + # run: ~/safe --log-output-dest=data-dir files upload "ubuntu-14.04.6-desktop-i386.iso" --retry-strategy quick + # env: + # SN_LOG: "all" + # timeout-minutes: 30 - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - platform: ubuntu-latest - log_file_prefix: safe_test_logs_large_file_upload - build: true + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # platform: ubuntu-latest + # log_file_prefix: safe_test_logs_large_file_upload + # build: true - - name: check there is no failed replication fetch - shell: bash - run: | - if grep -r "failed to fetch" $NODE_DATA_PATH - then - echo "We find failed replication fetch" - exit 1 - fi - env: - NODE_DATA_PATH: /home/runner/.local/share/safe/node - timeout-minutes: 1 + # - name: check there is no failed replication fetch + # shell: bash + # run: | + # if grep -r "failed to fetch" $NODE_DATA_PATH + # then + # echo "We find failed replication fetch" + # exit 1 + # fi + # env: + # NODE_DATA_PATH: /home/runner/.local/share/safe/node + # timeout-minutes: 1 - - name: Check the home dir leftover space - run: | - df - du -sh /home/runner/ + # - name: Check the home dir leftover space + # run: | + # df + # du -sh /home/runner/ - - name: Confirm the wallet files (cash_notes, confirmed_spends) - run: | - pwd - ls $CLIENT_DATA_PATH/ -l - ls $CLIENT_DATA_PATH/wallet -l - ls $CLIENT_DATA_PATH/wallet/cash_notes -l - ls $CLIENT_DATA_PATH/wallet/confirmed_spends -l - ls $CLIENT_DATA_PATH/logs -l - env: - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - timeout-minutes: 1 + # - name: Confirm the wallet files (cash_notes, confirmed_spends) + # run: | + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # ls $CLIENT_DATA_PATH/wallet/confirmed_spends -l + # ls $CLIENT_DATA_PATH/logs -l + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 1 - replication_bench_with_heavy_upload: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Replication bench with heavy upload - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 + # replication_bench_with_heavy_upload: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: Replication bench with heavy upload + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 - - name: install ripgrep - shell: bash - run: sudo apt-get install -y ripgrep + # - name: install ripgrep + # shell: bash + # run: sudo apt-get install -y ripgrep - - name: Download materials to create two 300MB test_files to be uploaded by client - shell: bash - run: | - mkdir test_data_1 - cd test_data_1 - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safe-qiWithListeners-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safenode-qiWithListeners-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safenode_rpc_client-qiWithListeners-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/faucet-qilesssubs-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safe-qilesssubs-x86_64.tar.gz - ls -l - cd .. - tar -cvzf test_data_1.tar.gz test_data_1 - mkdir test_data_2 - cd test_data_2 - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safenode-qilesssubs-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safenode_rpc_client-qilesssubs-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/faucet-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safe-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safenode-DebugMem-x86_64.tar.gz - ls -l - cd .. - tar -cvzf test_data_2.tar.gz test_data_2 - ls -l - mkdir test_data_3 - cd test_data_3 - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safenode_rpc_client-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/faucet-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safe-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safenode-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safenode_rpc_client-DebugMem-x86_64.tar.gz - ls -l - cd .. - tar -cvzf test_data_3.tar.gz test_data_3 - ls -l - df + # - name: Download materials to create two 300MB test_files to be uploaded by client + # shell: bash + # run: | + # mkdir test_data_1 + # cd test_data_1 + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safe-qiWithListeners-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safenode-qiWithListeners-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safenode_rpc_client-qiWithListeners-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/faucet-qilesssubs-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safe-qilesssubs-x86_64.tar.gz + # ls -l + # cd .. + # tar -cvzf test_data_1.tar.gz test_data_1 + # mkdir test_data_2 + # cd test_data_2 + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safenode-qilesssubs-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safenode_rpc_client-qilesssubs-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/faucet-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safe-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safenode-DebugMem-x86_64.tar.gz + # ls -l + # cd .. + # tar -cvzf test_data_2.tar.gz test_data_2 + # ls -l + # mkdir test_data_3 + # cd test_data_3 + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safenode_rpc_client-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/faucet-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safe-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safenode-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safenode_rpc_client-DebugMem-x86_64.tar.gz + # ls -l + # cd .. + # tar -cvzf test_data_3.tar.gz test_data_3 + # ls -l + # df - - name: Build binaries - run: cargo build --release --bin safenode --bin safe - timeout-minutes: 30 + # - name: Build binaries + # run: cargo build --release --bin safenode --bin safe + # timeout-minutes: 30 - - name: Build faucet binary - run: cargo build --release --bin faucet --features gifting - timeout-minutes: 30 + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features gifting + # timeout-minutes: 30 - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ubuntu-latest - build: true + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ubuntu-latest + # build: true - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi - - name: Create and fund a wallet to pay for files storage - run: | - ./target/release/safe --log-output-dest=data-dir wallet create --no-password - ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Create and fund a wallet to pay for files storage + # run: | + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Start a client to upload first file - run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_1.tar.gz" --retry-strategy quick - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Start a client to upload first file + # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_1.tar.gz" --retry-strategy quick + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Ensure no leftover cash_notes and payment files - run: | - expected_cash_notes_files="1" - expected_payment_files="0" - pwd - ls $CLIENT_DATA_PATH/ -l - ls $CLIENT_DATA_PATH/wallet -l - ls $CLIENT_DATA_PATH/wallet/cash_notes -l - cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - echo "Find $cash_note_files cash_note files" - if [ $expected_cash_notes_files -lt $cash_note_files ]; then - echo "Got too many cash_note files leftover: $cash_note_files" - exit 1 - fi - ls $CLIENT_DATA_PATH/wallet/payments -l - payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - if [ $expected_payment_files -lt $payment_files ]; then - echo "Got too many payment files leftover: $payment_files" - exit 1 - fi - env: - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 - - name: Wait for certain period - run: sleep 300 - timeout-minutes: 6 + # - name: Wait for certain period + # run: sleep 300 + # timeout-minutes: 6 - - name: Use same client to upload second file - run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_2.tar.gz" --retry-strategy quick - env: - SN_LOG: "all" - timeout-minutes: 10 + # - name: Use same client to upload second file + # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_2.tar.gz" --retry-strategy quick + # env: + # SN_LOG: "all" + # timeout-minutes: 10 - - name: Ensure no leftover cash_notes and payment files - run: | - expected_cash_notes_files="1" - expected_payment_files="0" - pwd - ls $CLIENT_DATA_PATH/ -l - ls $CLIENT_DATA_PATH/wallet -l - ls $CLIENT_DATA_PATH/wallet/cash_notes -l - cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) - if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then - echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" - exit 1 - fi - ls $CLIENT_DATA_PATH/wallet/payments -l - payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) - if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then - echo "Got too many payment files leftover: $payment_files" - exit 1 - fi - env: - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) + # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then + # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) + # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 - - name: Wait for certain period - run: sleep 300 - timeout-minutes: 6 + # - name: Wait for certain period + # run: sleep 300 + # timeout-minutes: 6 - # Start a different client to avoid local wallet slow down with more payments handled. - - name: Start a different client - run: | - pwd - mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first - ls -l $SAFE_DATA_PATH - ls -l $SAFE_DATA_PATH/client_first - mkdir $SAFE_DATA_PATH/client - ls -l $SAFE_DATA_PATH - mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs - ls -l $CLIENT_DATA_PATH - ./target/release/safe --log-output-dest=data-dir wallet create --no-password - ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - env: - SN_LOG: "all" - SAFE_DATA_PATH: /home/runner/.local/share/safe - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - timeout-minutes: 25 + # # Start a different client to avoid local wallet slow down with more payments handled. + # - name: Start a different client + # run: | + # pwd + # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first + # ls -l $SAFE_DATA_PATH + # ls -l $SAFE_DATA_PATH/client_first + # mkdir $SAFE_DATA_PATH/client + # ls -l $SAFE_DATA_PATH + # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs + # ls -l $CLIENT_DATA_PATH + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # SAFE_DATA_PATH: /home/runner/.local/share/safe + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 25 - - name: Use second client to upload third file - run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_3.tar.gz" --retry-strategy quick - env: - SN_LOG: "all" - timeout-minutes: 10 + # - name: Use second client to upload third file + # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_3.tar.gz" --retry-strategy quick + # env: + # SN_LOG: "all" + # timeout-minutes: 10 - - name: Ensure no leftover cash_notes and payment files - run: | - expected_cash_notes_files="1" - expected_payment_files="0" - pwd - ls $CLIENT_DATA_PATH/ -l - ls $CLIENT_DATA_PATH/wallet -l - ls $CLIENT_DATA_PATH/wallet/cash_notes -l - cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - echo "Find $cash_note_files cash_note files" - if [ $expected_cash_notes_files -lt $cash_note_files ]; then - echo "Got too many cash_note files leftover: $cash_note_files" - exit 1 - fi - ls $CLIENT_DATA_PATH/wallet/payments -l - payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - if [ $expected_payment_files -lt $payment_files ]; then - echo "Got too many payment files leftover: $payment_files" - exit 1 - fi - env: - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_heavy_replicate_bench - platform: ubuntu-latest + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_heavy_replicate_bench + # platform: ubuntu-latest diff --git a/.github/workflows/nightly-release.yml b/.github/workflows/nightly-release.yml new file mode 100644 index 0000000000..70db60d68e --- /dev/null +++ b/.github/workflows/nightly-release.yml @@ -0,0 +1,251 @@ +name: nightly release + +on: + schedule: + - cron: '0 0 * * *' # Run every night at midnight UTC + workflow_dispatch: # This also allows the workflow to be triggered manually + +env: + WORKFLOW_URL: https://github.com/maidsafe/safe_network/actions/runs + +jobs: + build: + if: ${{ github.repository_owner == 'maidsafe' }} + name: build + environment: stable + env: + FOUNDATION_PK: ${{ vars.FOUNDATION_PK }} + GENESIS_PK: ${{ vars.GENESIS_PK }} + GENESIS_SK: ${{ secrets.GENESIS_SK }} + NETWORK_ROYALTIES_PK: ${{ vars.NETWORK_ROYALTIES_PK }} + PAYMENT_FORWARD_PK: ${{ vars.PAYMENT_FORWARD_PK }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: windows-latest + target: x86_64-pc-windows-msvc + - os: macos-latest + target: x86_64-apple-darwin + - os: macos-latest + target: aarch64-apple-darwin + - os: ubuntu-latest + target: x86_64-unknown-linux-musl + - os: ubuntu-latest + target: arm-unknown-linux-musleabi + - os: ubuntu-latest + target: armv7-unknown-linux-musleabihf + - os: ubuntu-latest + target: aarch64-unknown-linux-musl + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: cargo-bins/cargo-binstall@main + - shell: bash + run: cargo binstall --no-confirm just + + - name: build nightly release artifacts + shell: bash + run: | + just build-release-artifacts "${{ matrix.target }}" "true" + + - uses: actions/upload-artifact@main + with: + name: safe_network-${{ matrix.target }} + path: | + artifacts + !artifacts/.cargo-lock + + - name: post notification to slack on failure + if: ${{ failure() }} + uses: bryannice/gitactions-slack-notification@2.0.0 + env: + SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + SLACK_TITLE: "Release Failed" + + s3-release: + if: ${{ github.repository_owner == 'maidsafe' }} + name: s3 release + runs-on: ubuntu-latest + needs: [build] + env: + AWS_ACCESS_KEY_ID: ${{ secrets.S3_DEPLOY_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DEPLOY_AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: eu-west-2 + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-pc-windows-msvc + path: artifacts/x86_64-pc-windows-msvc/release + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-unknown-linux-musl + path: artifacts/x86_64-unknown-linux-musl/release + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-apple-darwin + path: artifacts/x86_64-apple-darwin/release + - uses: actions/download-artifact@master + with: + name: safe_network-aarch64-apple-darwin + path: artifacts/aarch64-apple-darwin/release + - uses: actions/download-artifact@master + with: + name: safe_network-arm-unknown-linux-musleabi + path: artifacts/arm-unknown-linux-musleabi/release + - uses: actions/download-artifact@master + with: + name: safe_network-armv7-unknown-linux-musleabihf + path: artifacts/armv7-unknown-linux-musleabihf/release + - uses: actions/download-artifact@master + with: + name: safe_network-aarch64-unknown-linux-musl + path: artifacts/aarch64-unknown-linux-musl/release + + - uses: cargo-bins/cargo-binstall@main + - name: install just + shell: bash + run: cargo binstall --no-confirm just + + - name: remove latest nightly release + shell: bash + run: | + just delete-s3-bin "faucet" "nightly" + just delete-s3-bin "nat-detection" "nightly" + just delete-s3-bin "node-launchpad" "nightly" + just delete-s3-bin "safe" "nightly" + just delete-s3-bin "safenode" "nightly" + just delete-s3-bin "safenode_rpc_client" "nightly" + just delete-s3-bin "safenode-manager" "nightly" + just delete-s3-bin "safenodemand" "nightly" + just delete-s3-bin "sn_auditor" "nightly" + + - name: upload binaries to S3 + shell: bash + run: | + version=$(date +"%Y.%m.%d") + just package-bin "faucet" "$version" + just package-bin "nat-detection" "$version" + just package-bin "node-launchpad" "$version" + just package-bin "safe" "$version" + just package-bin "safenode" "$version" + just package-bin "safenode_rpc_client" "$version" + just package-bin "safenode-manager" "$version" + just package-bin "safenodemand" "$version" + just package-bin "sn_auditor" "$version" + just upload-all-packaged-bins-to-s3 + + rm -rf packaged_bins + just package-bin "faucet" "nightly" + just package-bin "nat-detection" "nightly" + just package-bin "node-launchpad" "nightly" + just package-bin "safe" "nightly" + just package-bin "safenode" "nightly" + just package-bin "safenode_rpc_client" "nightly" + just package-bin "safenode-manager" "nightly" + just package-bin "safenodemand" "nightly" + just package-bin "sn_auditor" "nightly" + just upload-all-packaged-bins-to-s3 + + github-release: + if: ${{ github.repository_owner == 'maidsafe' }} + name: github release + runs-on: ubuntu-latest + needs: [s3-release] + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-pc-windows-msvc + path: artifacts/x86_64-pc-windows-msvc/release + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-unknown-linux-musl + path: artifacts/x86_64-unknown-linux-musl/release + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-apple-darwin + path: artifacts/x86_64-apple-darwin/release + - uses: actions/download-artifact@master + with: + name: safe_network-aarch64-apple-darwin + path: artifacts/aarch64-apple-darwin/release + - uses: actions/download-artifact@master + with: + name: safe_network-arm-unknown-linux-musleabi + path: artifacts/arm-unknown-linux-musleabi/release + - uses: actions/download-artifact@master + with: + name: safe_network-armv7-unknown-linux-musleabihf + path: artifacts/armv7-unknown-linux-musleabihf/release + - uses: actions/download-artifact@master + with: + name: safe_network-aarch64-unknown-linux-musl + path: artifacts/aarch64-unknown-linux-musl/release + + - uses: cargo-bins/cargo-binstall@main + - name: install just + shell: bash + run: cargo binstall --no-confirm just + + - name: set package version + shell: bash + run: | + version=$(date +"%Y.%m.%d") + echo "PACKAGE_VERSION=$version" >> $GITHUB_ENV + + - name: package release artifacts + shell: bash + run: just package-all-architectures + + - name: delete existing nightly release + env: + GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} + run: | + releases=$(gh api repos/${{ github.repository }}/releases --paginate) + echo "$releases" | jq -c '.[]' | while read release; do + tag_name=$(echo $release | jq -r '.tag_name') + release_id=$(echo $release | jq -r '.id') + + if [[ $tag_name == nightly* ]]; then + echo "deleting nightly release $tag_name" + gh api -X DELETE repos/${{ github.repository }}/releases/$release_id + exit 0 + fi + done + + - name: create new nightly release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} + with: + tag_name: nightly-${{ env.PACKAGE_VERSION }} + release_name: "${{ env.PACKAGE_VERSION }} Nightly Release" + body: | + Nightly release of the Autonomi binary set, built from the `main` branch. + + These binaries should be compatible with the stable network, but they should be considered experimental. + + For the most reliable experience, prefer the latest stable release. + draft: false + prerelease: true + + - name: upload artifacts as assets + env: + GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} + shell: bash + run: | + ( + cd packaged_architectures + ls | xargs gh release upload nightly-${{ env.PACKAGE_VERSION }} + ) + + - name: post notification to slack on failure + if: ${{ failure() }} + uses: bryannice/gitactions-slack-notification@2.0.0 + env: + SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + SLACK_TITLE: "Nightly Release Failed" \ No newline at end of file diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 7165866f79..aac0ac9ad4 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -154,7 +154,7 @@ jobs: - name: Run network tests timeout-minutes: 25 - run: cargo test --release -p sn_networking + run: cargo test --release -p sn_networking --features="open-metrics" - name: Run protocol tests timeout-minutes: 25 @@ -199,11 +199,11 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features=local-discovery --bin safenode --bin faucet + run: cargo build --release --features=local --bin safenode --bin faucet timeout-minutes: 30 - name: Build testing executable - run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --no-run + run: cargo test --release -p sn_node --features=local --test sequential_transfers --test storage_payments --test double_spend --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -221,21 +221,21 @@ jobs: build: true - name: execute the sequential transfers test - run: cargo test --release -p sn_node --features="local-discovery" --test sequential_transfers -- --nocapture --test-threads=1 + run: cargo test --release -p sn_node --features="local" --test sequential_transfers -- --nocapture --test-threads=1 env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} SN_LOG: "all" timeout-minutes: 10 - name: execute the storage payment tests - run: cargo test --release -p sn_node --features="local-discovery" --test storage_payments -- --nocapture --test-threads=1 + run: cargo test --release -p sn_node --features="local" --test storage_payments -- --nocapture --test-threads=1 env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} SN_LOG: "all" timeout-minutes: 10 - name: execute the double spend tests - run: cargo test --release -p sn_node --features="local-discovery" --test double_spend -- --nocapture --test-threads=1 + run: cargo test --release -p sn_node --features="local" --test double_spend -- --nocapture --test-threads=1 env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 25 @@ -277,11 +277,11 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features=local-discovery --bin safenode --bin faucet + run: cargo build --release --features=local --bin safenode --bin faucet timeout-minutes: 30 - name: Build testing executable - run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run + run: cargo test --release -p sn_node --features=local --test spend_simulation --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -300,7 +300,7 @@ jobs: build: true - name: execute the spend simulation test - run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture + run: cargo test --release -p sn_node --features="local" --test spend_simulation -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 25 @@ -341,11 +341,11 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: Build binaries - run: cargo build --release --features=local-discovery,distribution --bin safenode --bin faucet + run: cargo build --release --features=local,distribution --bin safenode --bin faucet timeout-minutes: 30 - name: Build testing executable - run: cargo test --release --features=local-discovery,distribution --no-run + run: cargo test --release --features=local,distribution --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -373,7 +373,7 @@ jobs: fi - name: execute token_distribution tests - run: cargo test --release --features=local-discovery,distribution token_distribution -- --nocapture --test-threads=1 + run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1 env: SN_LOG: "all" CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} @@ -412,11 +412,11 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features local-discovery --bin safenode --bin faucet + run: cargo build --release --features local --bin safenode --bin faucet timeout-minutes: 30 - name: Build churn tests - run: cargo test --release -p sn_node --features=local-discovery --test data_with_churn --no-run + run: cargo test --release -p sn_node --features=local --test data_with_churn --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -434,7 +434,7 @@ jobs: build: true - name: Chunks data integrity during nodes churn (during 10min) (in theory) - run: cargo test --release -p sn_node --features="local-discovery" --test data_with_churn -- --nocapture + run: cargo test --release -p sn_node --features="local" --test data_with_churn -- --nocapture env: TEST_DURATION_MINS: 60 TEST_CHURN_CYCLES: 6 @@ -537,11 +537,11 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features local-discovery --bin safenode --bin faucet + run: cargo build --release --features local --bin safenode --bin faucet timeout-minutes: 30 - name: Build data location and routing table tests - run: cargo test --release -p sn_node --features=local-discovery --test verify_data_location --test verify_routing_table --no-run + run: cargo test --release -p sn_node --features=local --test verify_data_location --test verify_routing_table --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -559,20 +559,20 @@ jobs: build: true - name: Verify the Routing table of the nodes - run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 - name: Verify the location of the data on the network - run: cargo test --release -p sn_node --features="local-discovery" --test verify_data_location -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_data_location -- --nocapture env: SN_LOG: "all" CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 90 - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 diff --git a/.github/workflows/nightly_wan.yml b/.github/workflows/nightly_wan.yml index 78c1ff756f..9c84f58488 100644 --- a/.github/workflows/nightly_wan.yml +++ b/.github/workflows/nightly_wan.yml @@ -1,13 +1,16 @@ name: Nightly -- Full WAN Network Tests on: -# todo: this is totally broken atm. Fix and re-enable. -# schedule: -# - cron: "0 0 * * *" + schedule: + - cron: "0 0 * * *" + # enable as below for testing purpose. + # pull_request: + # branches: ["*"] workflow_dispatch: env: CARGO_INCREMENTAL: 0 # bookkeeping for incremental builds has overhead, not useful in CI. + NETWORK_NAME: DEV-01 WORKFLOW_URL: https://github.com/maidsafe/stableset_net/actions/runs jobs: e2e: @@ -29,31 +32,30 @@ jobs: run: cargo build --release --bin safe timeout-minutes: 30 - - name: Start a WAN network - uses: maidsafe/sn-testnet-action@main + - name: setup testnet-deploy + uses: maidsafe/sn-testnet-control-action/init-testnet-deploy@main with: - action: create - re-attempts: 3 - rust-log: debug ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} aws-region: eu-west-2 do-token: ${{ secrets.SN_TESTNET_DO_PAT }} ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - security-group-id: sg-0d47df5b3f0d01e2a - subnet-id: subnet-018f2ab26755df7f9 + + - name: launch ${{ env.NETWORK_NAME }} + uses: maidsafe/sn-testnet-control-action/launch-network@main + with: + ansible-forks: ${{ env.ANSIBLE_FORKS }} + environment-type: development + node-vm-count: 10 node-count: 20 - vm-count: 1 + uploader-vm-count: 0 + bootstrap-node-vm-count: 0 + log-format: json + network-name: ${{ env.NETWORK_NAME }} provider: digital-ocean - testnet-name: NightlyE2E - # if we were to run on a PR, use the below - # safe-network-user: ${{ github.actor }}" - # safe-network-branch: ${{ github.event.pull_request.head.ref }} - # Specify custom branch to prevent the deployer from fetching the latest release. - # The latest release contains the `network-contacts` feature turned on. - safe-network-user: maidsafe safe-network-branch: main + safe-network-user: maidsafe - name: Check env variables shell: bash @@ -61,10 +63,17 @@ jobs: echo "Peer is $SAFE_PEERS" echo "Deployment inventory is $SN_INVENTORY" + - name: start faucet + uses: maidsafe/sn-testnet-control-action/start-faucet@main + with: + network-name: ${{ env.NETWORK_NAME }} + - name: Obtain the funds from the faucet run: | + set -e + # read the inventory file - inventory_path=/home/runner/.local/share/safe/testnet-deploy/NightlyE2E-inventory.json + inventory_path=/home/runner/.local/share/safe/testnet-deploy/${{ env.NETWORK_NAME }}-inventory.json echo "Inventory Path: $inventory_path" faucet_address=$(jq -r '.faucet_address' $inventory_path) cargo run --bin safe --release -- wallet get-faucet ${faucet_address} @@ -73,48 +82,44 @@ jobs: timeout-minutes: 2 - name: Start a client to carry out chunk actions - run: cargo run --bin safe --release -- --log-output-dest=data-dir files upload "./resources" --retry-strategy quick + run: | + set -e + cargo run --bin safe --release -- --log-output-dest=data-dir files upload "./resources" --retry-strategy quick env: SN_LOG: "all" timeout-minutes: 2 - name: Start a client to create a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register create -n baobao + run: | + set -e + cargo run --bin safe --release -- --log-output-dest=data-dir register create -n baobao env: SN_LOG: "all" timeout-minutes: 2 - name: Start a client to get a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register get -n baobao + run: | + set -e + cargo run --bin safe --release -- --log-output-dest=data-dir register get -n baobao env: SN_LOG: "all" timeout-minutes: 2 - name: Start a client to edit a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register edit -n baobao wood + run: | + set -e + cargo run --bin safe --release -- --log-output-dest=data-dir register edit -n baobao wood env: SN_LOG: "all" timeout-minutes: 2 - - name: Fetch network logs - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: logs - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyE2E - safe-network-user: maidsafe - safe-network-branch: main + # - name: Fetch network logs + # uses: maidsafe/sn-testnet-control-action/fetch-logs@main + # with: + # re-attempts: 3 + # rust-log: debug + # provider: digital-ocean + # network-name: ${{ env.NETWORK_NAME }} - name: Upload local logs if: always() @@ -126,554 +131,517 @@ jobs: ~/.local/share/safe/*/*/*.log* ~/.local/share/safe/client/logs/*/*.log* - - name: Stop the WAN network + - name: destroy network if: always() - uses: maidsafe/sn-testnet-action@main + uses: maidsafe/sn-testnet-control-action/destroy-network@main with: - action: destroy - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 + network-name: ${{ env.NETWORK_NAME }} provider: digital-ocean - testnet-name: NightlyE2E - safe-network-user: maidsafe - safe-network-branch: main - - # - name: post notification to slack on failure - # if: ${{ failure() }} - # uses: bryannice/gitactions-slack-notification@2.0.0 - # env: - # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - # SLACK_TITLE: "Nightly E2E Test Run Failed" - - spend_test: - name: Spend tests against network - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: Build testing executable - run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --test spend_simulation --no-run - timeout-minutes: 40 - - - name: Start a WAN network - uses: maidsafe/sn-testnet-action@main - with: - action: create - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - security-group-id: sg-0d47df5b3f0d01e2a - subnet-id: subnet-018f2ab26755df7f9 - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlySpendTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Check env variables - shell: bash - run: | - echo "Peer is $SAFE_PEERS" - echo "Deployment inventory is $SN_INVENTORY" - - name: execute the sequential transfers test - run: cargo test --release -p sn_node --test sequential_transfers -- --nocapture --test-threads=1 + - name: post notification to slack on failure + if: ${{ failure() }} + uses: bryannice/gitactions-slack-notification@2.0.0 env: - SN_LOG: "all" - timeout-minutes: 45 - - - name: execute the storage payment tests - run: cargo test --release -p sn_node --test storage_payments -- --nocapture --test-threads=1 - env: - SN_LOG: "all" - timeout-minutes: 45 - - - name: execute the double spend tests - run: cargo test --release -p sn_node --test double_spend -- --nocapture --test-threads=1 - timeout-minutes: 45 - - - - name: execute the spend simulation tests - run: cargo test --release -p sn_node --test spend_simulation -- --nocapture --test-threads=1 - timeout-minutes: 45 - - - name: Small wait to allow reward receipt - run: sleep 30 - timeout-minutes: 1 - - - name: Fetch network logs - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: logs - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlySpendTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Upload local logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: local_logs_NightlySpendTest - path: | - ~/.local/share/safe/node/*/logs/*.log* - ~/.local/share/safe/*/*/*.log* - ~/.local/share/safe/client/logs/*/*.log* - - - name: Stop the WAN network - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: destroy - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlySpendTest - safe-network-user: maidsafe - safe-network-branch: main - - # - name: post notification to slack on failure - # if: ${{ failure() }} - # uses: bryannice/gitactions-slack-notification@2.0.0 - # env: - # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - # SLACK_TITLE: "Nightly Spend Test Run Failed" - - churn: - name: Network churning tests - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - os: ubuntu-latest - wan_logs_path: /home/runner/sn-testnet-deploy/logs - local_safe_path: /home/runner/.local/share/safe - # - os: windows-latest - # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node - # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - # - os: macos-latest - # node_data_path: /Users/runner/Library/Application Support/safe/node - # safe_path: /Users/runner/Library/Application Support/safe - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - name: install ripgrep - run: sudo apt-get -y install ripgrep - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: Build churn tests - run: cargo test --release -p sn_node --test data_with_churn --no-run - timeout-minutes: 30 - - - name: Start a WAN network - uses: maidsafe/sn-testnet-action@main - with: - action: create - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - security-group-id: sg-0d47df5b3f0d01e2a - subnet-id: subnet-018f2ab26755df7f9 - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyChurnTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Check env variables - shell: bash - run: | - echo "Peer is $SAFE_PEERS" - echo "Deployment inventory is $SN_INVENTORY" - - - name: Chunks data integrity during nodes churn - run: cargo test --release -p sn_node --test data_with_churn -- --nocapture - env: - # TEST_DURATION_MINS: 60 - # TEST_CHURN_CYCLES: 6 - # SN_LOG: "all" - # todo: lower time for testing - TEST_DURATION_MINS: 10 - TEST_CHURN_CYCLES: 2 - SN_LOG: "all" - timeout-minutes: 90 - - - name: Fetch network logs - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: logs - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyChurnTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Upload local logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: local_logs_NightlyChurnTest - path: | - ~/.local/share/safe/node/*/logs/*.log* - ~/.local/share/safe/*/*/*.log* - ~/.local/share/safe/client/logs/*/*.log* - - - name: Stop the WAN network - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: destroy - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyChurnTest - safe-network-user: maidsafe - safe-network-branch: main - - # TODO: re-enable the below scripts once we have proper way to restart nodes. - # Currently on remote network (not local), the nodes do not handle restart RPC cmd well. They reuse the same - # log location and the logs are over written. Hence the scripts might give false outputs. - - # - name: Verify restart of nodes using rg - # shell: bash - # timeout-minutes: 1 - # # get the counts, then the specific line, and then the digit count only - # # then check we have an expected level of restarts - # # TODO: make this use an env var, or relate to testnet size - # run : | - # restart_count=$(rg "Node is restarting in" "${{ matrix.wan_logs_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "Restart $restart_count nodes" - # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.wan_logs_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "PeerRemovedFromRoutingTable $peer_removed times" - # if [ $peer_removed -lt $restart_count ]; then - # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - # exit 1 - # fi - # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) - # echo "Node dir count is $node_count" - # # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here - - # # if [ $restart_count -lt $node_count ]; then - # # echo "Restart count of: $restart_count is less than the node count of: $node_count" - # # exit 1 - # # fi - - # - name: Verify data replication using rg - # shell: bash - # timeout-minutes: 1 - # # get the counts, then the specific line, and then the digit count only - # # then check we have an expected level of replication - # # TODO: make this use an env var, or relate to testnet size - # run : | - # fetching_attempt_count=$(rg "FetchingKeysForReplication" "${{ matrix.wan_logs_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "Carried out $fetching_attempt_count fetching attempts" - # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) - # if [ $fetching_attempt_count -lt $node_count ]; then - # echo "Replication fetching attempts of: $fetching_attempt_count is less than the node count of: $node_count" - # exit 1 - # fi - - # Only error out after uploading the logs - - name: Don't log raw data - if: always() && matrix.os != 'windows-latest' # causes error - shell: bash - timeout-minutes: 10 - run: | - if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 15000 { print; exit 1 }' - then - echo "We are logging an extremely large data" - exit 1 - fi - # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log - #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log - if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 15000 { print; exit 1 }' - then - echo "We are logging an extremely large data" - exit 1 - fi - - # sanity check - if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 1000 { print; exit 1 }' - then - echo "Sanity check pass for local safe path" - fi - # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log - #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log - if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 1000 { print; exit 1 }' - then - echo "Sanity check pass for wan logs path" - fi - - # - name: post notification to slack on failure - # if: ${{ failure() }} - # uses: bryannice/gitactions-slack-notification@2.0.0 - # env: - # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - # SLACK_TITLE: "Nightly Churn Test Run Failed" - - verify_data_location_routing_table: - name: Verify data location and Routing Table - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - os: ubuntu-latest - wan_logs_path: /home/runner/sn-testnet-deploy/logs - local_safe_path: /home/runner/.local/share/safe - # - os: windows-latest - # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node - # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - # - os: macos-latest - # node_data_path: /Users/runner/Library/Application Support/safe/node - # safe_path: /Users/runner/Library/Application Support/safe - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - name: install ripgrep - run: sudo apt-get -y install ripgrep - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: Build data location and routing table tests - run: cargo test --release -p sn_node --test verify_data_location --test verify_routing_table --no-run - timeout-minutes: 30 - - - name: Start a WAN network - uses: maidsafe/sn-testnet-action@main - with: - action: create - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - security-group-id: sg-0d47df5b3f0d01e2a - subnet-id: subnet-018f2ab26755df7f9 - node-count: 20 - vm-count: 1 - testnet-name: NightlyDataLocationTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Check env variables - shell: bash - run: | - echo "Peer is $SAFE_PEERS" - echo "Deployment inventory is $SN_INVENTORY" - - - name: Verify the Routing table of the nodes - run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture - timeout-minutes: 5 - - - name: Verify the location of the data on the network - run: cargo test --release -p sn_node --test verify_data_location -- --nocapture - env: - SN_LOG: "all" - timeout-minutes: 90 - - - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture - timeout-minutes: 5 - - - name: Fetch network logs - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: logs - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyDataLocationTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Upload local logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: local_logs_NightlyDataLocationTest - path: | - ~/.local/share/safe/node/*/logs/*.log* - ~/.local/share/safe/*/*/*.log* - ~/.local/share/safe/client/logs/*/*.log* - - - name: Stop the WAN network - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: destroy - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyDataLocationTest - safe-network-user: maidsafe - safe-network-branch: main - - # TODO: re-enable the below scripts once we have proper way to restart nodes. - # Currently on remote network (not local), the nodes do not handle restart RPC cmd well. They reuse the same - # log location and the logs are over written. Hence the scripts might give false outputs. - - # - name: Verify restart of nodes using rg - # shell: bash - # timeout-minutes: 1 - # # get the counts, then the specific line, and then the digit count only - # # then check we have an expected level of restarts - # # TODO: make this use an env var, or relate to testnet size - # run : | - # restart_count=$(rg "Node is restarting in" "${{ matrix.wan_logs_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "Restart $restart_count nodes" - # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.wan_logs_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "PeerRemovedFromRoutingTable $peer_removed times" - # if [ $peer_removed -lt $restart_count ]; then - # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - # exit 1 - # fi - # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) - # echo "Node dir count is $node_count" - # # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here - - # # if [ $restart_count -lt $node_count ]; then - # # echo "Restart count of: $restart_count is less than the node count of: $node_count" - # # exit 1 - # # fi - - # Only error out after uploading the logs - - name: Don't log raw data - if: always() && matrix.os != 'windows-latest' # causes error - shell: bash - timeout-minutes: 10 - run: | - if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 15000 { print; exit 1 }' - then - echo "We are logging an extremely large data" - exit 1 - fi - # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log - #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log - if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 15000 { print; exit 1 }' - then - echo "We are logging an extremely large data" - exit 1 - fi - - # sanity check - if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 1000 { print; exit 1 }' - then - echo "Sanity check pass for local safe path" - fi - # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log - #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log - if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 1000 { print; exit 1 }' - then - echo echo "Sanity check pass for wan logs path" - fi - - # - name: post notification to slack on failure - # if: ${{ failure() }} - # uses: bryannice/gitactions-slack-notification@2.0.0 - # env: - # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - # SLACK_TITLE: "Nightly Data Location Test Run Failed" + SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + SLACK_TITLE: "Nightly E2E Test Run Failed" + + # spend_test: + # name: Spend tests against network + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # os: [ubuntu-latest] + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + + # - uses: Swatinem/rust-cache@v2 + # continue-on-error: true + + # - name: Build testing executable + # run: cargo test --release -p sn_node --features=local --test sequential_transfers --test storage_payments --test double_spend --test spend_simulation --no-run + # timeout-minutes: 40 + + # - name: setup testnet-deploy + # uses: maidsafe/sn-testnet-control-action/init-testnet-deploy@main + # with: + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + + # - name: launch ${{ env.NETWORK_NAME }} + # uses: maidsafe/sn-testnet-control-action/launch-network@main + # with: + # ansible-forks: ${{ env.ANSIBLE_FORKS }} + # beta-encryption-key: ${{ env.DEFAULT_PAYMENT_FORWARD_SK }} + # environment-type: development + # faucet-version: ${{ env.FAUCET_VERSION }} + # log-format: json + # network-name: ${{ env.NETWORK_NAME }} + # network-contacts-file-name: ${{ env.NETWORK_CONTACTS_FILE_NAME }} + # provider: digital-ocean + # safe-network-branch: main + # safe-network-user: maidsafe + + # - name: Check env variables + # shell: bash + # run: | + # echo "Peer is $SAFE_PEERS" + # echo "Deployment inventory is $SN_INVENTORY" + + # - name: execute the sequential transfers test + # run: cargo test --release -p sn_node --test sequential_transfers -- --nocapture --test-threads=1 + # env: + # SN_LOG: "all" + # timeout-minutes: 45 + + # - name: execute the storage payment tests + # run: cargo test --release -p sn_node --test storage_payments -- --nocapture --test-threads=1 + # env: + # SN_LOG: "all" + # timeout-minutes: 45 + + # - name: execute the double spend tests + # run: cargo test --release -p sn_node --test double_spend -- --nocapture --test-threads=1 + # timeout-minutes: 45 + + # - name: execute the spend simulation tests + # run: cargo test --release -p sn_node --test spend_simulation -- --nocapture --test-threads=1 + # timeout-minutes: 45 + + # - name: Small wait to allow reward receipt + # run: sleep 30 + # timeout-minutes: 1 + + # - name: Fetch network logs + # uses: ermineJose/sn-testnet-control-action/fetch-logs@feat-add_fetch-logs-action + # with: + # re-attempts: 3 + # rust-log: debug + # provider: digital-ocean + # testnet-name: ${{ env.NETWORK_NAME }} + + # - name: Upload local logs + # if: always() + # uses: actions/upload-artifact@v4 + # with: + # name: local_logs_NightlySpendTest + # path: | + # ~/.local/share/safe/node/*/logs/*.log* + # ~/.local/share/safe/*/*/*.log* + # ~/.local/share/safe/client/logs/*/*.log* + + # - name: destroy network + # uses: maidsafe/sn-testnet-control-action/destroy-network@main + # with: + # network-name: ${{ env.NETWORK_NAME }} + # provider: digital-ocean + + # - name: post notification to slack on failure + # if: ${{ failure() }} + # uses: bryannice/gitactions-slack-notification@2.0.0 + # env: + # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + # SLACK_TITLE: "Nightly Spend Test Run Failed" + + # churn: + # name: Network churning tests + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # include: + # - os: ubuntu-latest + # wan_logs_path: /home/runner/sn-testnet-deploy/logs + # local_safe_path: /home/runner/.local/share/safe + # # - os: windows-latest + # # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node + # # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + # # - os: macos-latest + # # node_data_path: /Users/runner/Library/Application Support/safe/node + # # safe_path: /Users/runner/Library/Application Support/safe + # steps: + # - uses: actions/checkout@v4 + # + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # + # - name: install ripgrep + # run: sudo apt-get -y install ripgrep + # + # - uses: Swatinem/rust-cache@v2 + # continue-on-error: true + # + # - name: Build churn tests + # run: cargo test --release -p sn_node --test data_with_churn --no-run + # timeout-minutes: 30 + # + # - name: Start a WAN network + # uses: maidsafe/sn-testnet-action@main + # with: + # action: create + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # security-group-id: sg-0d47df5b3f0d01e2a + # subnet-id: subnet-018f2ab26755df7f9 + # node-count: 20 + # vm-count: 1 + # provider: digital-ocean + # testnet-name: NightlyChurnTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # - name: Check env variables + # shell: bash + # run: | + # echo "Peer is $SAFE_PEERS" + # echo "Deployment inventory is $SN_INVENTORY" + # + # - name: Chunks data integrity during nodes churn + # run: cargo test --release -p sn_node --test data_with_churn -- --nocapture + # env: + # # TEST_DURATION_MINS: 60 + # # TEST_CHURN_CYCLES: 6 + # # SN_LOG: "all" + # # todo: lower time for testing + # TEST_DURATION_MINS: 10 + # TEST_CHURN_CYCLES: 2 + # SN_LOG: "all" + # timeout-minutes: 90 + # + # - name: Fetch network logs + # if: always() + # uses: maidsafe/sn-testnet-action@main + # with: + # action: logs + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # node-count: 20 + # vm-count: 1 + # provider: digital-ocean + # testnet-name: NightlyChurnTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # - name: Upload local logs + # if: always() + # uses: actions/upload-artifact@v4 + # with: + # name: local_logs_NightlyChurnTest + # path: | + # ~/.local/share/safe/node/*/logs/*.log* + # ~/.local/share/safe/*/*/*.log* + # ~/.local/share/safe/client/logs/*/*.log* + # + # - name: Stop the WAN network + # if: always() + # uses: maidsafe/sn-testnet-action@main + # with: + # action: destroy + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # node-count: 20 + # vm-count: 1 + # provider: digital-ocean + # testnet-name: NightlyChurnTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # # TODO: re-enable the below scripts once we have proper way to restart nodes. + # # Currently on remote network (not local), the nodes do not handle restart RPC cmd well. They reuse the same + # # log location and the logs are over written. Hence the scripts might give false outputs. + # + # # - name: Verify restart of nodes using rg + # # shell: bash + # # timeout-minutes: 1 + # # # get the counts, then the specific line, and then the digit count only + # # # then check we have an expected level of restarts + # # # TODO: make this use an env var, or relate to testnet size + # # run : | + # # restart_count=$(rg "Node is restarting in" "${{ matrix.wan_logs_path }}" -c --stats | \ + # # rg "(\d+) matches" | rg "\d+" -o) + # # echo "Restart $restart_count nodes" + # # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.wan_logs_path }}" -c --stats | \ + # # rg "(\d+) matches" | rg "\d+" -o) + # # echo "PeerRemovedFromRoutingTable $peer_removed times" + # # if [ $peer_removed -lt $restart_count ]; then + # # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" + # # exit 1 + # # fi + # # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) + # # echo "Node dir count is $node_count" + # # # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here + # + # # # if [ $restart_count -lt $node_count ]; then + # # # echo "Restart count of: $restart_count is less than the node count of: $node_count" + # # # exit 1 + # # # fi + # + # # - name: Verify data replication using rg + # # shell: bash + # # timeout-minutes: 1 + # # # get the counts, then the specific line, and then the digit count only + # # # then check we have an expected level of replication + # # # TODO: make this use an env var, or relate to testnet size + # # run : | + # # fetching_attempt_count=$(rg "FetchingKeysForReplication" "${{ matrix.wan_logs_path }}" -c --stats | \ + # # rg "(\d+) matches" | rg "\d+" -o) + # # echo "Carried out $fetching_attempt_count fetching attempts" + # # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) + # # if [ $fetching_attempt_count -lt $node_count ]; then + # # echo "Replication fetching attempts of: $fetching_attempt_count is less than the node count of: $node_count" + # # exit 1 + # # fi + # + # # Only error out after uploading the logs + # - name: Don't log raw data + # if: always() && matrix.os != 'windows-latest' # causes error + # shell: bash + # timeout-minutes: 10 + # run: | + # if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 15000 { print; exit 1 }' + # then + # echo "We are logging an extremely large data" + # exit 1 + # fi + # # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log + # #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log + # if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 15000 { print; exit 1 }' + # then + # echo "We are logging an extremely large data" + # exit 1 + # fi + # + # # sanity check + # if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 1000 { print; exit 1 }' + # then + # echo "Sanity check pass for local safe path" + # fi + # # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log + # #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log + # if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 1000 { print; exit 1 }' + # then + # echo "Sanity check pass for wan logs path" + # fi + # + # # - name: post notification to slack on failure + # # if: ${{ failure() }} + # # uses: bryannice/gitactions-slack-notification@2.0.0 + # # env: + # # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + # # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + # # SLACK_TITLE: "Nightly Churn Test Run Failed" + # + # verify_data_location_routing_table: + # name: Verify data location and Routing Table + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # include: + # - os: ubuntu-latest + # wan_logs_path: /home/runner/sn-testnet-deploy/logs + # local_safe_path: /home/runner/.local/share/safe + # # - os: windows-latest + # # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node + # # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + # # - os: macos-latest + # # node_data_path: /Users/runner/Library/Application Support/safe/node + # # safe_path: /Users/runner/Library/Application Support/safe + # steps: + # - uses: actions/checkout@v4 + # + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # + # - name: install ripgrep + # run: sudo apt-get -y install ripgrep + # + # - uses: Swatinem/rust-cache@v2 + # continue-on-error: true + # + # - name: Build data location and routing table tests + # run: cargo test --release -p sn_node --test verify_data_location --test verify_routing_table --no-run + # timeout-minutes: 30 + # + # - name: Start a WAN network + # uses: maidsafe/sn-testnet-action@main + # with: + # action: create + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # security-group-id: sg-0d47df5b3f0d01e2a + # subnet-id: subnet-018f2ab26755df7f9 + # node-count: 20 + # vm-count: 1 + # testnet-name: NightlyDataLocationTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # - name: Check env variables + # shell: bash + # run: | + # echo "Peer is $SAFE_PEERS" + # echo "Deployment inventory is $SN_INVENTORY" + # + # - name: Verify the Routing table of the nodes + # run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture + # timeout-minutes: 5 + # + # - name: Verify the location of the data on the network + # run: cargo test --release -p sn_node --test verify_data_location -- --nocapture + # env: + # SN_LOG: "all" + # timeout-minutes: 90 + # + # - name: Verify the routing tables of the nodes + # run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture + # timeout-minutes: 5 + # + # - name: Fetch network logs + # if: always() + # uses: maidsafe/sn-testnet-action@main + # with: + # action: logs + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # node-count: 20 + # vm-count: 1 + # provider: digital-ocean + # testnet-name: NightlyDataLocationTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # - name: Upload local logs + # if: always() + # uses: actions/upload-artifact@v4 + # with: + # name: local_logs_NightlyDataLocationTest + # path: | + # ~/.local/share/safe/node/*/logs/*.log* + # ~/.local/share/safe/*/*/*.log* + # ~/.local/share/safe/client/logs/*/*.log* + # + # - name: Stop the WAN network + # if: always() + # uses: maidsafe/sn-testnet-action@main + # with: + # action: destroy + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # node-count: 20 + # vm-count: 1 + # provider: digital-ocean + # testnet-name: NightlyDataLocationTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # # TODO: re-enable the below scripts once we have proper way to restart nodes. + # # Currently on remote network (not local), the nodes do not handle restart RPC cmd well. They reuse the same + # # log location and the logs are over written. Hence the scripts might give false outputs. + # + # # - name: Verify restart of nodes using rg + # # shell: bash + # # timeout-minutes: 1 + # # # get the counts, then the specific line, and then the digit count only + # # # then check we have an expected level of restarts + # # # TODO: make this use an env var, or relate to testnet size + # # run : | + # # restart_count=$(rg "Node is restarting in" "${{ matrix.wan_logs_path }}" -c --stats | \ + # # rg "(\d+) matches" | rg "\d+" -o) + # # echo "Restart $restart_count nodes" + # # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.wan_logs_path }}" -c --stats | \ + # # rg "(\d+) matches" | rg "\d+" -o) + # # echo "PeerRemovedFromRoutingTable $peer_removed times" + # # if [ $peer_removed -lt $restart_count ]; then + # # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" + # # exit 1 + # # fi + # # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) + # # echo "Node dir count is $node_count" + # # # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here + # + # # # if [ $restart_count -lt $node_count ]; then + # # # echo "Restart count of: $restart_count is less than the node count of: $node_count" + # # # exit 1 + # # # fi + # + # # Only error out after uploading the logs + # - name: Don't log raw data + # if: always() && matrix.os != 'windows-latest' # causes error + # shell: bash + # timeout-minutes: 10 + # run: | + # if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 15000 { print; exit 1 }' + # then + # echo "We are logging an extremely large data" + # exit 1 + # fi + # # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log + # #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log + # if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 15000 { print; exit 1 }' + # then + # echo "We are logging an extremely large data" + # exit 1 + # fi + # + # # sanity check + # if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 1000 { print; exit 1 }' + # then + # echo "Sanity check pass for local safe path" + # fi + # # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log + # #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log + # if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 1000 { print; exit 1 }' + # then + # echo echo "Sanity check pass for wan logs path" + # fi + # + # # - name: post notification to slack on failure + # # if: ${{ failure() }} + # # uses: bryannice/gitactions-slack-notification@2.0.0 + # # env: + # # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + # # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + # # SLACK_TITLE: "Nightly Data Location Test Run Failed" diff --git a/.github/workflows/node_man_tests.yml b/.github/workflows/node_man_tests.yml index ea49a67372..55cd701cbf 100644 --- a/.github/workflows/node_man_tests.yml +++ b/.github/workflows/node_man_tests.yml @@ -35,122 +35,122 @@ jobs: - shell: bash run: cargo test --lib --package sn-node-manager - node-manager-user-mode-e2e-tests: - name: user-mode e2e - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - include: - - { os: ubuntu-latest } - - { os: macos-latest } - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - - - name: Build binaries - run: cargo build --release --bin safenode --bin faucet - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - shell: bash - run: | - cargo test --package sn-node-manager --release --test e2e -- --nocapture - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: node_man_tests_user_mode - platform: ${{ matrix.os }} - - node-manager-e2e-tests: - name: system-wide e2e - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - include: - - { os: ubuntu-latest, elevated: sudo -E env PATH="$PATH" } - - { os: macos-latest, elevated: sudo -E } - - { os: windows-latest } - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - - - name: Build binaries - run: cargo build --release --bin safenode --bin faucet - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - shell: bash - if: matrix.os == 'ubuntu-latest' || matrix.os == 'macos-latest' - run: | - ${{ matrix.elevated }} rustup default stable - ${{ matrix.elevated }} cargo test --package sn-node-manager --release --test e2e -- --nocapture - - # Powershell step runs as admin by default. - - name: run integration test in powershell - if: matrix.os == 'windows-latest' - shell: pwsh - run: | - curl -L -o WinSW.exe $env:WINSW_URL - - New-Item -ItemType Directory -Force -Path "$env:GITHUB_WORKSPACE\bin" - Move-Item -Path WinSW.exe -Destination "$env:GITHUB_WORKSPACE\bin" - $env:PATH += ";$env:GITHUB_WORKSPACE\bin" - - cargo test --release --package sn-node-manager --test e2e -- --nocapture - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: node_man_tests_system_wide - platform: ${{ matrix.os }} + # node-manager-user-mode-e2e-tests: + # name: user-mode e2e + # runs-on: ${{ matrix.os }} + # strategy: + # fail-fast: false + # matrix: + # include: + # - { os: ubuntu-latest } + # - { os: macos-latest } + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 + + # - name: Build binaries + # run: cargo build --release --bin safenode --bin faucet + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi + + # - shell: bash + # run: | + # cargo test --package sn-node-manager --release --test e2e -- --nocapture + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: node_man_tests_user_mode + # platform: ${{ matrix.os }} + + # node-manager-e2e-tests: + # name: system-wide e2e + # runs-on: ${{ matrix.os }} + # strategy: + # fail-fast: false + # matrix: + # include: + # - { os: ubuntu-latest, elevated: sudo -E env PATH="$PATH" } + # - { os: macos-latest, elevated: sudo -E } + # - { os: windows-latest } + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 + + # - name: Build binaries + # run: cargo build --release --bin safenode --bin faucet + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi + + # - shell: bash + # if: matrix.os == 'ubuntu-latest' || matrix.os == 'macos-latest' + # run: | + # ${{ matrix.elevated }} rustup default stable + # ${{ matrix.elevated }} cargo test --package sn-node-manager --release --test e2e -- --nocapture + + # # Powershell step runs as admin by default. + # - name: run integration test in powershell + # if: matrix.os == 'windows-latest' + # shell: pwsh + # run: | + # curl -L -o WinSW.exe $env:WINSW_URL + + # New-Item -ItemType Directory -Force -Path "$env:GITHUB_WORKSPACE\bin" + # Move-Item -Path WinSW.exe -Destination "$env:GITHUB_WORKSPACE\bin" + # $env:PATH += ";$env:GITHUB_WORKSPACE\bin" + + # cargo test --release --package sn-node-manager --test e2e -- --nocapture + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: node_man_tests_system_wide + # platform: ${{ matrix.os }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 810a8dc264..0d1ecc79fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,22 +7,31 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 *When editing this file, please respect a line length of 100.* -## 2024-10-08 +## 2024-10-22 -### Network +Unfortunately the entry for this release will not have fully detailed changes. This release is +special in that it's very large and moves us to a new, EVM-based payments system. The Github Release +description has a list of all the merged PRs. If you want more detail, consult the PR list. Normal +service will resume for subsequent releases. -#### Changed +Here is a brief summary of the changes: -- Optimize auditor tracking by not to re-attempt fetched spend. -- Optimize auditor tracking function by using DashMap and stream. +- A new `autonomi` CLI that uses EVM payments and replaces the previous `safe` CLI. +- A new `autonomi` API that replaces `sn_client` with a simpler interface. +- The node has been changed to use EVM payments. +- The node runs without a wallet. This increases security and removes the need for forwarding. +- Data is paid for through an EVM smart contract. Payment proofs are not linked to the original + data. +- Payment royalties have been removed, resulting in less centralization and fees. -## 2024-10-07 +## 2024-10-08 ### Network #### Changed -- The auditor's webservice has new endpoints that allow it to be restarted or terminated +- Optimize auditor tracking by not to re-attempt fetched spend. +- Optimize auditor tracking function by using DashMap and stream. ## 2024-10-07 diff --git a/Cargo.lock b/Cargo.lock index f35188f13d..81f3daed4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,18 +33,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.1", -] - [[package]] name = "aes" version = "0.8.4" @@ -52,7 +40,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", - "cipher 0.4.4", + "cipher", "cpufeatures", ] @@ -63,8 +51,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ "aead", - "aes 0.8.4", - "cipher 0.4.4", + "aes", + "cipher", "ctr", "ghash", "subtle", @@ -77,8 +65,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae0784134ba9375416d469ec31e7c5f9fa94405049cf08c5ce5b4698be673e0d" dependencies = [ "aead", - "aes 0.8.4", - "cipher 0.4.4", + "aes", + "cipher", "ctr", "polyval", "subtle", @@ -128,6 +116,533 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +[[package]] +name = "alloy" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "056f2c01b2aed86e15b43c47d109bfc8b82553dc34e66452875e51247ec31ab2" +dependencies = [ + "alloy-consensus", + "alloy-contract", + "alloy-core", + "alloy-eips", + "alloy-genesis", + "alloy-network", + "alloy-node-bindings", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-serde", + "alloy-signer", + "alloy-signer-local", + "alloy-transport", + "alloy-transport-http", +] + +[[package]] +name = "alloy-chains" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "805f7a974de5804f5c053edc6ca43b20883bdd3a733b3691200ae3a4b454a2db" +dependencies = [ + "num_enum", + "strum", +] + +[[package]] +name = "alloy-consensus" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "auto_impl", + "c-kzg", + "derive_more", + "serde", +] + +[[package]] +name = "alloy-contract" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "917f7d12cf3971dc8c11c9972f732b35ccb9aaaf5f28f2f87e9e6523bee3a8ad" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-sol-types", + "alloy-transport", + "futures", + "futures-util", + "thiserror", +] + +[[package]] +name = "alloy-core" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ce854562e7cafd5049189d0268d6e5cba05fe6c9cb7c6f8126a79b94800629c" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives", + "alloy-rlp", + "alloy-sol-types", +] + +[[package]] +name = "alloy-dyn-abi" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b499852e1d0e9b8c6db0f24c48998e647c0d5762a01090f955106a7700e4611" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-type-parser", + "alloy-sol-types", + "const-hex", + "itoa", + "serde", + "serde_json", + "winnow", +] + +[[package]] +name = "alloy-eip2930" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-eip7702" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-eips" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" +dependencies = [ + "alloy-eip2930", + "alloy-eip7702", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "derive_more", + "once_cell", + "serde", + "sha2 0.10.8", +] + +[[package]] +name = "alloy-genesis" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-json-abi" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a438d4486b5d525df3b3004188f9d5cd1d65cd30ecc41e5a3ccef6f6342e8af9" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-json-rpc" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", + "alloy-sol-types", + "async-trait", + "auto_impl", + "futures-utils-wasm", + "thiserror", +] + +[[package]] +name = "alloy-network-primitives" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-node-bindings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" +dependencies = [ + "alloy-genesis", + "alloy-primitives", + "k256", + "rand 0.8.5", + "serde_json", + "tempfile", + "thiserror", + "tracing", + "url", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "hashbrown 0.14.5", + "hex-literal", + "indexmap 2.5.0", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand 0.8.5", + "ruint", + "rustc-hash", + "serde", + "sha3 0.10.8", + "tiny-keccak", +] + +[[package]] +name = "alloy-provider" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", + "alloy-node-bindings", + "alloy-primitives", + "alloy-rpc-client", + "alloy-rpc-types-anvil", + "alloy-rpc-types-eth", + "alloy-signer-local", + "alloy-transport", + "alloy-transport-http", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "futures", + "futures-utils-wasm", + "lru", + "pin-project", + "reqwest 0.12.7", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "alloy-transport-http", + "futures", + "pin-project", + "reqwest 0.12.7", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.1", + "tracing", + "url", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-anvil", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-anvil" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "derive_more", + "itertools 0.13.0", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-serde" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-signer" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "elliptic-curve 0.13.8", + "k256", + "thiserror", +] + +[[package]] +name = "alloy-signer-local" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-signer", + "async-trait", + "k256", + "rand 0.8.5", + "thiserror", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap 2.5.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.77", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.77", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc85178909a49c8827ffccfc9103a7ce1767ae66a801b69bdc326913870bf8e6" +dependencies = [ + "serde", + "winnow", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + +[[package]] +name = "alloy-transport" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.1", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower 0.5.1", + "tracing", + "url", + "wasm-bindgen-futures", +] + +[[package]] +name = "alloy-transport-http" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest 0.12.7", + "serde_json", + "tower 0.5.1", + "tracing", + "url", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -210,6 +725,130 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint 0.4.6", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "arrayref" version = "0.3.9" @@ -252,7 +891,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", "synstructure", ] @@ -264,7 +903,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -336,9 +975,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.6" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -347,37 +986,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "async-trait" -version = "0.1.83" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] -name = "asynchronous-codec" -version = "0.6.2" +name = "async-trait" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ - "bytes", - "futures-sink", - "futures-util", - "memchr", - "pin-project-lite", + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] @@ -415,45 +1041,97 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "autocfg" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", ] [[package]] name = "autocfg" -version = "1.4.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "autonomi" -version = "0.1.2" +version = "0.2.0" dependencies = [ "bip39", "blsttc", "bytes", - "libp2p", + "console_error_panic_hook", + "const-hex", + "evmlib", + "eyre", + "futures", + "hex 0.4.3", + "instant", + "js-sys", + "libp2p 0.54.1", "rand 0.8.5", "rmp-serde", "self_encryption", "serde", - "sn_client", + "serde-wasm-bindgen", + "sha2 0.10.8", + "sn_bls_ckd", + "sn_curv", + "sn_evm", + "sn_logging", + "sn_networking", + "sn_peers_acquisition", "sn_protocol", "sn_registers", - "sn_transfers", + "test_utils", "thiserror", + "tiny_http", "tokio", "tracing", "tracing-subscriber", + "tracing-web", "walkdir", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test", "xor_name", ] +[[package]] +name = "autonomi-cli" +version = "0.1.1" +dependencies = [ + "autonomi", + "clap", + "color-eyre", + "criterion", + "dirs-next", + "eyre", + "indicatif", + "rand 0.8.5", + "rayon", + "sn_build_info", + "sn_logging", + "sn_peers_acquisition", + "tempfile", + "tokio", + "tracing", +] + [[package]] name = "axum" version = "0.6.20" @@ -477,7 +1155,7 @@ dependencies = [ "rustversion", "serde", "sync_wrapper 0.1.2", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", ] @@ -540,6 +1218,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.13.1" @@ -564,12 +1248,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "bech32" -version = "0.10.0-beta" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98f7eed2b2781a6f0b5c903471d48e15f56fb4e1165df8a9a2337fd1a59d45ea" - [[package]] name = "better-panic" version = "0.3.0" @@ -595,7 +1273,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ - "bitcoin_hashes 0.11.0", + "bitcoin_hashes", "serde", "unicode-normalization", ] @@ -615,43 +1293,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" -[[package]] -name = "bitcoin" -version = "0.31.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c85783c2fe40083ea54a33aa2f0ba58831d90fcd190f5bdc47e74e84d2a96ae" -dependencies = [ - "base64 0.21.7", - "bech32", - "bitcoin-internals", - "bitcoin_hashes 0.13.0", - "hex-conservative", - "hex_lit", - "secp256k1 0.28.2", -] - -[[package]] -name = "bitcoin-internals" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" - [[package]] name = "bitcoin_hashes" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" -[[package]] -name = "bitcoin_hashes" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" -dependencies = [ - "bitcoin-internals", - "hex-conservative", -] - [[package]] name = "bitflags" version = "1.3.2" @@ -719,16 +1366,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-modes" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb03d1bed155d89dce0f845b7899b18a9a163e148fd004e1c28421a783e2d8e" -dependencies = [ - "block-padding 0.2.1", - "cipher 0.3.0", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -773,8 +1410,8 @@ checksum = "1ff3694b352ece02eb664a09ffb948ee69b35afa2e6ac444a6b8cb9d515deebd" dependencies = [ "blst", "byte-slice-cast", - "ff", - "group", + "ff 0.12.1", + "group 0.12.1", "pairing", "rand_core 0.6.4", "serde", @@ -789,8 +1426,8 @@ checksum = "1186a39763321a0b73d1a10aa4fc067c5d042308509e8f6cc31d2c2a7ac61ac2" dependencies = [ "blst", "blstrs", - "ff", - "group", + "ff 0.12.1", + "group 0.12.1", "hex 0.4.3", "hex_fmt", "pairing", @@ -903,6 +1540,21 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "c-kzg" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" +dependencies = [ + "blst", + "cc", + "glob", + "hex 0.4.3", + "libc", + "once_cell", + "serde", +] + [[package]] name = "camino" version = "1.1.9" @@ -929,7 +1581,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -962,7 +1614,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -976,9 +1628,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.24" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -998,7 +1650,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ "cfg-if", - "cipher 0.4.4", + "cipher", "cpufeatures", ] @@ -1010,7 +1662,7 @@ checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ "aead", "chacha20", - "cipher 0.4.4", + "cipher", "poly1305", "zeroize", ] @@ -1025,6 +1677,7 @@ dependencies = [ "iana-time-zone", "js-sys", "num-traits", + "serde", "wasm-bindgen", "windows-targets 0.52.6", ] @@ -1062,15 +1715,6 @@ dependencies = [ "half", ] -[[package]] -name = "cipher" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "cipher" version = "0.4.4" @@ -1084,9 +1728,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.19" +version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" +checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" dependencies = [ "clap_builder", "clap_derive", @@ -1094,9 +1738,9 @@ dependencies = [ [[package]] name = "clap-verbosity-flag" -version = "2.2.2" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e099138e1807662ff75e2cebe4ae2287add879245574489f9b1588eb5e5564ed" +checksum = "63d19864d6b68464c59f7162c9914a0b569ddc2926b4a2d71afe62a9738eff53" dependencies = [ "clap", "log", @@ -1104,9 +1748,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.19" +version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" +checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" dependencies = [ "anstream", "anstyle", @@ -1114,19 +1758,19 @@ dependencies = [ "strsim", "terminal_size", "unicase", - "unicode-width 0.2.0", + "unicode-width", ] [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -1246,7 +1890,7 @@ dependencies = [ "encode_unicode", "lazy_static", "libc", - "unicode-width 0.1.14", + "unicode-width", "windows-sys 0.52.0", ] @@ -1260,6 +1904,19 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "const-hex" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex 0.4.3", + "proptest", + "serde", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -1359,8 +2016,6 @@ version = "7.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "387808c885b79055facbd4b2e806a683fe1bc37abc7dfa5fea1974ad2d4137b0" dependencies = [ - "num", - "quickcheck", "serde", "tiny-keccak", ] @@ -1496,6 +2151,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array 0.14.7", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -1523,7 +2190,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -1550,7 +2217,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version", + "rustc_version 0.4.1", "subtle", "zeroize", ] @@ -1563,7 +2230,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -1585,7 +2252,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", "synstructure", ] @@ -1610,7 +2277,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -1621,7 +2288,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -1714,6 +2381,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", + "serde", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -1728,16 +2407,24 @@ dependencies = [ ] [[package]] -name = "dialoguer" -version = "0.11.0" +name = "derive_more" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ - "console", - "shell-words", - "tempfile", - "thiserror", - "zeroize", + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", + "unicode-xid", ] [[package]] @@ -1777,6 +2464,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", + "const-oid", "crypto-common", "subtle", ] @@ -1851,7 +2539,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -1869,21 +2557,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" -[[package]] -name = "dot-generator" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aaac7ada45f71873ebce336491d1c1bc4a7c8042c7cea978168ad59e805b871" -dependencies = [ - "dot-structures", -] - -[[package]] -name = "dot-structures" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "675e35c02a51bb4d4618cb4885b3839ce6d1787c97b664474d9208d074742e20" - [[package]] name = "downcast" version = "0.11.0" @@ -1909,11 +2582,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ "der 0.6.1", - "elliptic-curve", - "rfc6979", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", "signature 1.6.4", ] +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der 0.7.9", + "digest 0.10.7", + "elliptic-curve 0.13.8", + "rfc6979 0.4.0", + "signature 2.2.0", + "spki 0.7.3", +] + [[package]] name = "ed25519" version = "2.2.3" @@ -1951,16 +2638,35 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ - "base16ct", - "crypto-bigint", + "base16ct 0.1.1", + "crypto-bigint 0.4.9", "der 0.6.1", "digest 0.10.7", - "ff", + "ff 0.12.1", "generic-array 0.14.7", - "group", + "group 0.12.1", "pkcs8 0.9.0", "rand_core 0.6.4", - "sec1", + "sec1 0.3.0", + "subtle", + "zeroize", +] + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct 0.2.0", + "crypto-bigint 0.5.5", + "digest 0.10.7", + "ff 0.13.0", + "generic-array 0.14.7", + "group 0.13.0", + "pkcs8 0.10.2", + "rand_core 0.6.4", + "sec1 0.7.3", "subtle", "zeroize", ] @@ -1989,7 +2695,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -2039,6 +2745,32 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "evm_testnet" +version = "0.1.1" +dependencies = [ + "clap", + "dirs-next", + "evmlib", + "sn_evm", + "tokio", +] + +[[package]] +name = "evmlib" +version = "0.1.1" +dependencies = [ + "alloy", + "dirs-next", + "getrandom 0.2.15", + "rand 0.8.5", + "serde", + "serde_with", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "eyre" version = "0.6.12" @@ -2078,6 +2810,17 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "ff" version = "0.12.1" @@ -2089,6 +2832,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff-zeroize" version = "0.6.3" @@ -2155,6 +2908,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -2163,9 +2928,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", "miniz_oxide 0.8.0", @@ -2306,7 +3071,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -2371,6 +3136,12 @@ dependencies = [ "slab", ] +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + [[package]] name = "generic-array" version = "0.12.4" @@ -2388,6 +3159,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -2702,7 +3474,7 @@ checksum = "999ce923619f88194171a67fb3e6d613653b8d4d6078b529b15a765da0edcc17" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -2980,31 +3752,26 @@ dependencies = [ ] [[package]] -name = "graphviz-rust" -version = "0.9.0" +name = "group" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c33d03804e2ce21db5821f2beb4e54f844a8f90326e6bd99a1771dc54aef427" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ - "dot-generator", - "dot-structures", - "into-attr", - "into-attr-derive", - "pest", - "pest_derive", + "ff 0.12.1", "rand 0.8.5", - "tempfile", + "rand_core 0.6.4", + "rand_xorshift 0.3.0", + "subtle", ] [[package]] name = "group" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", - "rand 0.8.5", + "ff 0.13.0", "rand_core 0.6.4", - "rand_xorshift 0.3.0", "subtle", ] @@ -3020,7 +3787,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.6.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util 0.7.12", @@ -3057,14 +3824,9 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", + "serde", ] -[[package]] -name = "hashbrown" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" - [[package]] name = "headers" version = "0.3.9" @@ -3141,10 +3903,10 @@ dependencies = [ ] [[package]] -name = "hex-conservative" -version = "0.1.2" +name = "hex-literal" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hex_fmt" @@ -3152,12 +3914,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" -[[package]] -name = "hex_lit" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" - [[package]] name = "hickory-proto" version = "0.24.1" @@ -3310,9 +4066,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.5" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -3425,9 +4181,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -3438,6 +4194,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", + "tower 0.4.13", "tower-service", "tracing", ] @@ -3555,6 +4312,26 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "indenter" version = "0.3.3" @@ -3567,18 +4344,20 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", "hashbrown 0.12.3", + "serde", ] [[package]] name = "indexmap" -version = "2.6.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.14.5", + "serde", ] [[package]] @@ -3592,7 +4371,7 @@ dependencies = [ "number_prefix", "portable-atomic", "tokio", - "unicode-width 0.1.14", + "unicode-width", ] [[package]] @@ -3602,7 +4381,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.6.0", + "indexmap 2.5.0", "is-terminal", "itoa", "log", @@ -3630,7 +4409,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" dependencies = [ "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -3645,28 +4424,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "into-attr" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b48c537e49a709e678caec3753a7dba6854661a1eaa27675024283b3f8b376" -dependencies = [ - "dot-structures", -] - -[[package]] -name = "into-attr-derive" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecac7c1ae6cd2c6a3a64d1061a8bdc7f52ff62c26a831a2301e54c1b5d70d5b1" -dependencies = [ - "dot-generator", - "dot-structures", - "into-attr", - "quote", - "syn 1.0.109", -] - [[package]] name = "ipconfig" version = "0.3.2" @@ -3764,6 +4521,19 @@ dependencies = [ "serde", ] +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", + "once_cell", + "sha2 0.10.8", +] + [[package]] name = "keccak" version = "0.1.5" @@ -3773,6 +4543,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -3781,9 +4561,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.159" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libm" @@ -3803,22 +4583,47 @@ dependencies = [ "futures-timer", "getrandom 0.2.15", "instant", - "libp2p-allow-block-list", + "libp2p-allow-block-list 0.3.0", + "libp2p-connection-limits 0.3.1", + "libp2p-core 0.41.3", + "libp2p-identify 0.44.2", + "libp2p-identity", + "libp2p-kad 0.45.3", + "libp2p-metrics 0.14.1", + "libp2p-swarm 0.44.2", + "multiaddr", + "pin-project", + "rw-stream-sink", + "thiserror", +] + +[[package]] +name = "libp2p" +version = "0.54.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" +dependencies = [ + "bytes", + "either", + "futures", + "futures-timer", + "getrandom 0.2.15", + "libp2p-allow-block-list 0.4.0", "libp2p-autonat", - "libp2p-connection-limits", - "libp2p-core", + "libp2p-connection-limits 0.4.0", + "libp2p-core 0.42.0", "libp2p-dns", "libp2p-gossipsub", - "libp2p-identify", + "libp2p-identify 0.45.0", "libp2p-identity", - "libp2p-kad", + "libp2p-kad 0.46.2", "libp2p-mdns", - "libp2p-metrics", + "libp2p-metrics 0.15.0", "libp2p-noise", "libp2p-quic", "libp2p-relay", "libp2p-request-response", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "libp2p-tcp", "libp2p-upnp", "libp2p-websocket", @@ -3836,31 +4641,49 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" dependencies = [ - "libp2p-core", + "libp2p-core 0.41.3", + "libp2p-identity", + "libp2p-swarm 0.44.2", + "void", +] + +[[package]] +name = "libp2p-allow-block-list" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" +dependencies = [ + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "void", ] [[package]] name = "libp2p-autonat" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95151726170e41b591735bf95c42b888fe4aa14f65216a9fbf0edcc04510586" +checksum = "a083675f189803d0682a2726131628e808144911dad076858bfbe30b13065499" dependencies = [ "async-trait", - "asynchronous-codec 0.6.2", + "asynchronous-codec", + "bytes", + "either", "futures", + "futures-bounded", "futures-timer", - "instant", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-request-response", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "quick-protobuf", - "quick-protobuf-codec 0.2.0", + "quick-protobuf-codec", "rand 0.8.5", + "rand_core 0.6.4", + "thiserror", "tracing", + "void", + "web-time", ] [[package]] @@ -3869,9 +4692,21 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7cd50a78ccfada14de94cbacd3ce4b0138157f376870f13d3a8422cd075b4fd" dependencies = [ - "libp2p-core", + "libp2p-core 0.41.3", + "libp2p-identity", + "libp2p-swarm 0.44.2", + "void", +] + +[[package]] +name = "libp2p-connection-limits" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" +dependencies = [ + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "void", ] @@ -3903,16 +4738,44 @@ dependencies = [ "web-time", ] +[[package]] +name = "libp2p-core" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-identity", + "multiaddr", + "multihash", + "multistream-select", + "once_cell", + "parking_lot", + "pin-project", + "quick-protobuf", + "rand 0.8.5", + "rw-stream-sink", + "smallvec", + "thiserror", + "tracing", + "unsigned-varint 0.8.0", + "void", + "web-time", +] + [[package]] name = "libp2p-dns" -version = "0.41.1" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d17cbcf7160ff35c3e8e560de4a068fe9d6cb777ea72840e48eb76ff9576c4b6" +checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "parking_lot", "smallvec", @@ -3921,12 +4784,12 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.46.1" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d665144a616dadebdc5fff186b1233488cdcd8bfb1223218ff084b6d052c94f7" +checksum = "b4e830fdf24ac8c444c12415903174d506e1e077fbe3875c404a78c5935a8543" dependencies = [ - "asynchronous-codec 0.7.0", - "base64 0.21.7", + "asynchronous-codec", + "base64 0.22.1", "byteorder", "bytes", "either", @@ -3935,19 +4798,19 @@ dependencies = [ "futures-ticker", "getrandom 0.2.15", "hex_fmt", - "instant", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "prometheus-client", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand 0.8.5", "regex", "sha2 0.10.8", "smallvec", "tracing", "void", + "web-time", ] [[package]] @@ -3956,17 +4819,40 @@ version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5d635ebea5ca0c3c3e77d414ae9b67eccf2a822be06091b9c1a0d13029a1e2f" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core 0.41.3", + "libp2p-identity", + "libp2p-swarm 0.44.2", + "lru", + "quick-protobuf", + "quick-protobuf-codec", + "smallvec", + "thiserror", + "tracing", + "void", +] + +[[package]] +name = "libp2p-identify" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" +dependencies = [ + "asynchronous-codec", "either", "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "lru", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "smallvec", "thiserror", "tracing", @@ -3998,7 +4884,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cc5767727d062c4eac74dd812c998f0e488008e82cce9c33b463d38423f9ad2" dependencies = [ "arrayvec", - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "either", "fnv", @@ -4006,11 +4892,39 @@ dependencies = [ "futures-bounded", "futures-timer", "instant", - "libp2p-core", + "libp2p-core 0.41.3", + "libp2p-identity", + "libp2p-swarm 0.44.2", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "sha2 0.10.8", + "smallvec", + "thiserror", + "tracing", + "uint", + "void", +] + +[[package]] +name = "libp2p-kad" +version = "0.46.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced237d0bd84bbebb7c2cad4c073160dacb4fe40534963c32ed6d4c6bb7702a3" +dependencies = [ + "arrayvec", + "asynchronous-codec", + "bytes", + "either", + "fnv", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand 0.8.5", "sha2 0.10.8", "smallvec", @@ -4018,21 +4932,22 @@ dependencies = [ "tracing", "uint", "void", + "web-time", ] [[package]] name = "libp2p-mdns" -version = "0.45.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49007d9a339b3e1d7eeebc4d67c05dbf23d300b7d091193ec2d3f26802d7faf2" +checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" dependencies = [ "data-encoding", "futures", "hickory-proto", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "rand 0.8.5", "smallvec", "socket2", @@ -4049,27 +4964,44 @@ checksum = "fdac91ae4f291046a3b2660c039a2830c931f84df2ee227989af92f7692d3357" dependencies = [ "futures", "instant", - "libp2p-core", - "libp2p-identify", + "libp2p-core 0.41.3", + "libp2p-identify 0.44.2", + "libp2p-identity", + "libp2p-kad 0.45.3", + "libp2p-swarm 0.44.2", + "pin-project", + "prometheus-client", +] + +[[package]] +name = "libp2p-metrics" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" +dependencies = [ + "futures", + "libp2p-core 0.42.0", + "libp2p-identify 0.45.0", "libp2p-identity", - "libp2p-kad", + "libp2p-kad 0.46.2", "libp2p-relay", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "pin-project", "prometheus-client", + "web-time", ] [[package]] name = "libp2p-noise" -version = "0.44.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecd0545ce077f6ea5434bcb76e8d0fe942693b4380aaad0d34a358c2bd05793" +checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "curve25519-dalek 4.1.3", "futures", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "multiaddr", "multihash", @@ -4087,15 +5019,15 @@ dependencies = [ [[package]] name = "libp2p-quic" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c67296ad4e092e23f92aea3d2bdb6f24eab79c0929ed816dfb460ea2f4567d2b" +checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" dependencies = [ "bytes", "futures", "futures-timer", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-tls", "parking_lot", @@ -4111,21 +5043,21 @@ dependencies = [ [[package]] name = "libp2p-relay" -version = "0.17.2" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d1c667cfabf3dd675c8e3cea63b7b98434ecf51721b7894cbb01d29983a6a9b" +checksum = "10df23d7f5b5adcc129f4a69d6fbd05209e356ccf9e8f4eb10b2692b79c77247" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "either", "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand 0.8.5", "static_assertions", "thiserror", @@ -4136,24 +5068,24 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.26.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c314fe28368da5e3a262553fb0ad575c1c8934c461e10de10265551478163836" +checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" dependencies = [ "async-trait", "cbor4ii", "futures", "futures-bounded", "futures-timer", - "instant", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "rand 0.8.5", "serde", "smallvec", "tracing", "void", + "web-time", ] [[package]] @@ -4166,9 +5098,30 @@ dependencies = [ "fnv", "futures", "futures-timer", - "getrandom 0.2.15", "instant", - "libp2p-core", + "libp2p-core 0.41.3", + "libp2p-identity", + "lru", + "multistream-select", + "once_cell", + "rand 0.8.5", + "smallvec", + "tracing", + "void", +] + +[[package]] +name = "libp2p-swarm" +version = "0.45.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "getrandom 0.2.15", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-swarm-derive", "lru", @@ -4180,31 +5133,32 @@ dependencies = [ "tracing", "void", "wasm-bindgen-futures", + "web-time", ] [[package]] name = "libp2p-swarm-derive" -version = "0.34.2" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5daceb9dd908417b6dfcfe8e94098bc4aac54500c282e78120b885dadc09b999" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] name = "libp2p-tcp" -version = "0.41.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b2460fc2748919adff99ecbc1aab296e4579e41f374fb164149bd2c9e529d4c" +checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "socket2", "tokio", @@ -4213,13 +5167,13 @@ dependencies = [ [[package]] name = "libp2p-tls" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b7b831e55ce2aa6c354e6861a85fdd4dd0a2b97d5e276fabac0e4810a71776" +checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "rcgen", "ring 0.17.8", @@ -4232,15 +5186,15 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccf04b0e3ff3de52d07d5fd6c3b061d0e7f908ffc683c32d9638caedce86fc8" +checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core", - "libp2p-swarm", + "libp2p-core 0.42.0", + "libp2p-swarm 0.45.1", "tokio", "tracing", "void", @@ -4248,14 +5202,14 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.43.2" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b953b6803a1f3161a989538974d72511c4e48a4af355337b6fb90723c56c05" +checksum = "888b2ff2e5d8dcef97283daab35ad1043d18952b65e05279eecbe02af4c6e347" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "parking_lot", "pin-project-lite", @@ -4269,14 +5223,14 @@ dependencies = [ [[package]] name = "libp2p-websocket-websys" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f95cd8a32fcf94ad1e5c2de37c2a05a5a4188d8358b005859a0fc9e63b6953bc" +checksum = "38cf9b429dd07be52cd82c4c484b1694df4209210a7db3b9ffb00c7606e230c8" dependencies = [ "bytes", "futures", "js-sys", - "libp2p-core", + "libp2p-core 0.42.0", "parking_lot", "send_wrapper 0.6.0", "thiserror", @@ -4287,13 +5241,13 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.45.2" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddd5265f6b80f94d48a3963541aad183cc598a645755d2f1805a373e41e0716b" +checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" dependencies = [ "either", "futures", - "libp2p-core", + "libp2p-core 0.42.0", "thiserror", "tracing", "yamux 0.12.1", @@ -4329,7 +5283,7 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", "scopeguard", ] @@ -4418,6 +5372,16 @@ dependencies = [ "unicase", ] +[[package]] +name = "minicov" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c71e683cd655513b99affab7d317deb690528255a0d5f717f1024093c12b169" +dependencies = [ + "cc", + "walkdir", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -4442,19 +5406,6 @@ dependencies = [ "adler2", ] -[[package]] -name = "minreq" -version = "2.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763d142cdff44aaadd9268bebddb156ef6c65a0e13486bb81673cf2d8739f9b0" -dependencies = [ - "log", - "once_cell", - "rustls 0.21.12", - "rustls-webpki 0.101.7", - "webpki-roots 0.25.4", -] - [[package]] name = "mio" version = "0.8.11" @@ -4531,7 +5482,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -4554,9 +5505,9 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.18.2" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" +checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" dependencies = [ "arrayref", "byteorder", @@ -4567,7 +5518,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.8.0", + "unsigned-varint 0.7.2", "url", ] @@ -4614,14 +5565,16 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.7" +version = "0.2.8" dependencies = [ "clap", "clap-verbosity-flag", "color-eyre", "futures", - "libp2p", + "libp2p 0.54.1", + "sn_build_info", "sn_networking", + "sn_protocol", "tokio", "tracing", "tracing-log 0.2.0", @@ -4729,7 +5682,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.3.19" +version = "0.4.0" dependencies = [ "atty", "better-panic", @@ -4752,13 +5705,17 @@ dependencies = [ "pretty_assertions", "prometheus-parse", "ratatui", - "reqwest 0.12.8", + "regex", + "reqwest 0.12.7", "serde", "serde_json", "signal-hook", "sn-node-manager", "sn-releases", + "sn_build_info", + "sn_evm", "sn_peers_acquisition", + "sn_protocol", "sn_service_management", "strip-ansi-escapes", "strum", @@ -4816,27 +5773,13 @@ dependencies = [ "winapi", ] -[[package]] -name = "num" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" -dependencies = [ - "num-bigint 0.4.6", - "num-complex", - "num-integer", - "num-iter", - "num-rational", - "num-traits", -] - [[package]] name = "num-bigint" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", "num-integer", "num-traits", ] @@ -4852,16 +5795,6 @@ dependencies = [ "serde", ] -[[package]] -name = "num-complex" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" -dependencies = [ - "num-traits", - "serde", -] - [[package]] name = "num-conv" version = "0.1.0" @@ -4888,46 +5821,43 @@ dependencies = [ ] [[package]] -name = "num-iter" -version = "0.1.45" +name = "num-traits" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ - "autocfg 1.4.0", - "num-integer", - "num-traits", + "autocfg 1.3.0", + "libm", ] [[package]] -name = "num-rational" -version = "0.4.2" +name = "num_cpus" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "num-bigint 0.4.6", - "num-integer", - "num-traits", - "serde", + "hermit-abi 0.3.9", + "libc", ] [[package]] -name = "num-traits" -version = "0.2.19" +name = "num_enum" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" dependencies = [ - "autocfg 1.4.0", - "libm", + "num_enum_derive", ] [[package]] -name = "num_cpus" -version = "1.16.0" +name = "num_enum_derive" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ - "hermit-abi 0.3.9", - "libc", + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] @@ -4965,12 +5895,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" -dependencies = [ - "portable-atomic", -] +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -5133,8 +6060,8 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", "sha2 0.10.8", ] @@ -5144,7 +6071,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" dependencies = [ - "group", + "group 0.12.1", ] [[package]] @@ -5162,6 +6089,32 @@ dependencies = [ "zeroize", ] +[[package]] +name = "parity-scale-codec" +version = "3.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "parking" version = "2.2.1" @@ -5273,7 +6226,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -5294,9 +6247,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.6.0", - "serde", - "serde_derive", + "indexmap 2.5.0", ] [[package]] @@ -5316,7 +6267,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -5353,9 +6304,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plist" @@ -5364,7 +6315,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42cf17e9a1800f5f396bc67d193dc9411b59012a5876445ef450d449881e1016" dependencies = [ "base64 0.22.1", - "indexmap 2.6.0", + "indexmap 2.5.0", "quick-xml 0.32.0", "serde", "time", @@ -5438,9 +6389,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" [[package]] name = "powerfmt" @@ -5532,6 +6483,48 @@ dependencies = [ "yansi", ] +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "proc-macro2" version = "1.0.86" @@ -5567,7 +6560,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -5693,26 +6686,13 @@ dependencies = [ "byteorder", ] -[[package]] -name = "quick-protobuf-codec" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" -dependencies = [ - "asynchronous-codec 0.6.2", - "bytes", - "quick-protobuf", - "thiserror", - "unsigned-varint 0.7.2", -] - [[package]] name = "quick-protobuf-codec" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "quick-protobuf", "thiserror", @@ -5866,6 +6846,7 @@ dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.4", + "serde", ] [[package]] @@ -6039,7 +7020,7 @@ dependencies = [ "strum_macros", "unicode-segmentation", "unicode-truncate", - "unicode-width 0.1.14", + "unicode-width", ] [[package]] @@ -6085,9 +7066,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -6190,9 +7171,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ "base64 0.22.1", "bytes", @@ -6213,7 +7194,7 @@ dependencies = [ "pin-project-lite", "quinn", "rustls 0.23.13", - "rustls-pemfile 2.2.0", + "rustls-pemfile 2.1.3", "rustls-pki-types", "serde", "serde_json", @@ -6246,11 +7227,21 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint", + "crypto-bigint 0.4.9", "hmac 0.12.1", "zeroize", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac 0.12.1", + "subtle", +] + [[package]] name = "rgb" version = "0.8.50" @@ -6290,6 +7281,16 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + [[package]] name = "rmp" version = "0.8.14" @@ -6324,17 +7325,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "rpassword" -version = "7.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f" -dependencies = [ - "libc", - "rtoolbox", - "windows-sys 0.48.0", -] - [[package]] name = "rtnetlink" version = "0.10.1" @@ -6351,15 +7341,35 @@ dependencies = [ ] [[package]] -name = "rtoolbox" -version = "0.0.2" +name = "ruint" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e" +checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" dependencies = [ - "libc", - "windows-sys 0.48.0", + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint 0.4.6", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", ] +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + [[package]] name = "rust-ini" version = "0.19.0" @@ -6382,13 +7392,28 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver", + "semver 1.0.23", ] [[package]] @@ -6423,19 +7448,7 @@ dependencies = [ "log", "ring 0.16.20", "sct 0.6.1", - "webpki 0.21.4", -] - -[[package]] -name = "rustls" -version = "0.20.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" -dependencies = [ - "log", - "ring 0.16.20", - "sct 0.7.1", - "webpki 0.22.4", + "webpki", ] [[package]] @@ -6464,15 +7477,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustls-pemfile" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" -dependencies = [ - "base64 0.13.1", -] - [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -6484,18 +7488,19 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.2.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.9.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" @@ -6600,7 +7605,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ - "base16ct", + "base16ct 0.1.1", "der 0.6.1", "generic-array 0.14.7", "pkcs8 0.9.0", @@ -6609,25 +7614,28 @@ dependencies = [ ] [[package]] -name = "secp256k1" -version = "0.20.3" +name = "sec1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "rand 0.6.5", - "secp256k1-sys 0.4.2", - "serde", + "base16ct 0.2.0", + "der 0.7.9", + "generic-array 0.14.7", + "pkcs8 0.10.2", + "subtle", + "zeroize", ] [[package]] name = "secp256k1" -version = "0.28.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" +checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" dependencies = [ - "bitcoin_hashes 0.13.0", - "rand 0.8.5", - "secp256k1-sys 0.9.2", + "rand 0.6.5", + "secp256k1-sys", + "serde", ] [[package]] @@ -6639,15 +7647,6 @@ dependencies = [ "cc", ] -[[package]] -name = "secp256k1-sys" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" -dependencies = [ - "cc", -] - [[package]] name = "secrecy" version = "0.8.0" @@ -6663,7 +7662,7 @@ version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9439a0cb3efb35e080a1576e3e00a804caab04010adc802aed88cf539b103ed" dependencies = [ - "aes 0.8.4", + "aes", "bincode", "brotli", "bytes", @@ -6683,6 +7682,15 @@ dependencies = [ "xor_name", ] +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.23" @@ -6692,6 +7700,15 @@ dependencies = [ "serde", ] +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "send_wrapper" version = "0.4.0" @@ -6713,6 +7730,17 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-wasm-bindgen" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8302e169f0eddcc139c70f139d19d6467353af16f9fce27e8c30158036a1e16b" +dependencies = [ + "js-sys", + "serde", + "wasm-bindgen", +] + [[package]] name = "serde_bytes" version = "0.11.15" @@ -6730,7 +7758,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -6747,9 +7775,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" dependencies = [ "serde", ] @@ -6764,15 +7792,45 @@ dependencies = [ ] [[package]] -name = "serde_urlencoded" -version = "0.7.1" +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" +dependencies = [ + "base64 0.22.1", + "chrono", + "hex 0.4.3", + "indexmap 1.9.3", + "indexmap 2.5.0", + "serde", + "serde_derive", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", + "darling", + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] @@ -6781,7 +7839,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.5.0", "itoa", "ryu", "serde", @@ -6866,6 +7924,26 @@ dependencies = [ "opaque-debug 0.3.1", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -6875,12 +7953,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shell-words" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" - [[package]] name = "shlex" version = "1.3.0" @@ -6934,6 +8006,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ + "digest 0.10.7", "rand_core 0.6.4", ] @@ -6943,7 +8016,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", ] [[package]] @@ -6954,7 +8027,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.10.6" +version = "0.11.0" dependencies = [ "assert_cmd", "assert_fs", @@ -6966,19 +8039,21 @@ dependencies = [ "colored", "dirs-next", "indicatif", - "libp2p", + "libp2p 0.54.1", "libp2p-identity", "mockall 0.12.1", "nix 0.27.1", "predicates 3.1.2", "prost 0.9.0", "rand 0.8.5", - "reqwest 0.12.8", - "semver", + "reqwest 0.12.7", + "semver 1.0.23", "serde", "serde_json", "service-manager", "sn-releases", + "sn_build_info", + "sn_evm", "sn_logging", "sn_peers_acquisition", "sn_protocol", @@ -7005,8 +8080,8 @@ dependencies = [ "flate2", "lazy_static", "regex", - "reqwest 0.12.8", - "semver", + "reqwest 0.12.7", + "semver 1.0.23", "serde_json", "tar", "thiserror", @@ -7014,28 +8089,6 @@ dependencies = [ "zip", ] -[[package]] -name = "sn_auditor" -version = "0.3.5" -dependencies = [ - "blsttc", - "clap", - "color-eyre", - "dirs-next", - "futures", - "graphviz-rust", - "lazy_static", - "serde", - "serde_json", - "sn_client", - "sn_logging", - "sn_peers_acquisition", - "tiny_http", - "tokio", - "tracing", - "urlencoding", -] - [[package]] name = "sn_bls_ckd" version = "0.2.1" @@ -7050,102 +8103,11 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.15" -dependencies = [ - "vergen", -] - -[[package]] -name = "sn_cli" -version = "0.95.3" +version = "0.1.16" dependencies = [ - "aes 0.7.5", - "base64 0.22.1", - "bitcoin", - "block-modes", - "blsttc", - "bytes", "chrono", - "clap", - "color-eyre", - "criterion", - "custom_debug", - "dialoguer", - "dirs-next", - "eyre", - "futures", - "hex 0.4.3", - "indicatif", - "libp2p", - "rand 0.8.5", - "rayon", - "reqwest 0.12.8", - "rmp-serde", - "rpassword", - "serde", - "sn_build_info", - "sn_client", - "sn_logging", - "sn_peers_acquisition", - "sn_protocol", - "tempfile", - "tiny-keccak", - "tokio", - "tracing", - "url", - "walkdir", - "xor_name", -] - -[[package]] -name = "sn_client" -version = "0.110.4" -dependencies = [ - "assert_matches", - "async-trait", - "backoff", - "bip39", - "blsttc", - "bytes", - "console_error_panic_hook", - "crdts", - "custom_debug", - "dashmap", - "dirs-next", - "eyre", - "futures", - "getrandom 0.2.15", - "hex 0.4.3", - "itertools 0.12.1", - "libp2p", - "libp2p-identity", - "petgraph", - "prometheus-client", - "rand 0.8.5", - "rayon", - "rmp-serde", - "self_encryption", - "serde", - "sn_bls_ckd", - "sn_client", - "sn_curv", - "sn_logging", - "sn_networking", - "sn_peers_acquisition", - "sn_protocol", - "sn_registers", - "sn_transfers", - "tempfile", - "thiserror", - "tiny-keccak", - "tokio", "tracing", - "tracing-wasm", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasmtimer", - "web-sys", - "xor_name", + "vergen", ] [[package]] @@ -7169,53 +8131,44 @@ dependencies = [ "pairing-plus", "rand 0.6.5", "rand 0.7.3", - "secp256k1 0.20.3", + "secp256k1", "serde", "serde_bytes", "serde_derive", "sha2 0.8.2", "sha2 0.9.9", - "sha3", + "sha3 0.9.1", "thiserror", "typenum", "zeroize", ] [[package]] -name = "sn_faucet" -version = "0.5.3" +name = "sn_evm" +version = "0.1.1" dependencies = [ - "assert_fs", - "base64 0.22.1", - "bitcoin", - "blsttc", - "clap", - "color-eyre", - "dirs-next", - "fs2", - "futures", + "custom_debug", + "evmlib", "hex 0.4.3", - "indicatif", - "minreq", - "reqwest 0.12.8", + "lazy_static", + "libp2p 0.53.2", + "rand 0.8.5", + "ring 0.17.8", + "rmp-serde", "serde", "serde_json", - "sn_build_info", - "sn_cli", - "sn_client", - "sn_logging", - "sn_peers_acquisition", - "sn_protocol", - "sn_transfers", + "tempfile", + "thiserror", + "tiny-keccak", "tokio", "tracing", - "url", - "warp", + "wasmtimer", + "xor_name", ] [[package]] name = "sn_logging" -version = "0.2.36" +version = "0.2.37" dependencies = [ "chrono", "color-eyre", @@ -7240,7 +8193,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.16" +version = "0.1.17" dependencies = [ "clap", "color-eyre", @@ -7254,7 +8207,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.18.4" +version = "0.19.0" dependencies = [ "aes-gcm-siv", "async-trait", @@ -7269,15 +8222,17 @@ dependencies = [ "hyper 0.14.30", "itertools 0.12.1", "lazy_static", - "libp2p", + "libp2p 0.54.1", "libp2p-identity", "prometheus-client", "quickcheck", "rand 0.8.5", "rayon", "rmp-serde", + "self_encryption", "serde", "sn_build_info", + "sn_evm", "sn_protocol", "sn_registers", "sn_transfers", @@ -7297,36 +8252,38 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.111.4" +version = "0.112.0" dependencies = [ "assert_fs", - "assert_matches", "async-trait", + "autonomi", "blsttc", "bytes", "chrono", "clap", "color-eyre", + "const-hex", "crdts", "custom_debug", "dirs-next", + "evmlib", "eyre", "file-rotate", "futures", "hex 0.4.3", "itertools 0.12.1", - "libp2p", + "libp2p 0.54.1", "prometheus-client", "prost 0.9.0", "rand 0.8.5", "rayon", - "reqwest 0.12.8", + "reqwest 0.12.7", "rmp-serde", "self_encryption", "serde", "serde_json", "sn_build_info", - "sn_client", + "sn_evm", "sn_logging", "sn_networking", "sn_peers_acquisition", @@ -7335,6 +8292,7 @@ dependencies = [ "sn_service_management", "sn_transfers", "strum", + "sysinfo", "tempfile", "test_utils", "thiserror", @@ -7351,7 +8309,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.31" +version = "0.6.32" dependencies = [ "assert_fs", "async-trait", @@ -7359,9 +8317,9 @@ dependencies = [ "clap", "color-eyre", "hex 0.4.3", - "libp2p", + "libp2p 0.54.1", "libp2p-identity", - "sn_client", + "sn_build_info", "sn_logging", "sn_node", "sn_peers_acquisition", @@ -7378,13 +8336,13 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.5.3" +version = "0.5.4" dependencies = [ "clap", "lazy_static", - "libp2p", + "libp2p 0.54.1", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.7", "sn_protocol", "thiserror", "tokio", @@ -7394,7 +8352,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.11" +version = "0.17.12" dependencies = [ "blsttc", "bytes", @@ -7404,13 +8362,14 @@ dependencies = [ "dirs-next", "hex 0.4.3", "lazy_static", - "libp2p", + "libp2p 0.54.1", "prost 0.9.0", "rmp-serde", "serde", "serde_json", "sha2 0.10.8", "sn_build_info", + "sn_evm", "sn_registers", "sn_transfers", "thiserror", @@ -7423,7 +8382,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.3.21" +version = "0.4.0" dependencies = [ "blsttc", "crdts", @@ -7440,21 +8399,21 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.3.14" +version = "0.4.0" dependencies = [ "async-trait", "dirs-next", - "libp2p", + "libp2p 0.54.1", "libp2p-identity", "mockall 0.11.4", "prost 0.9.0", - "semver", + "semver 1.0.23", "serde", "serde_json", "service-manager", + "sn_evm", "sn_logging", "sn_protocol", - "sn_transfers", "sysinfo", "thiserror", "tokio", @@ -7466,7 +8425,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.19.3" +version = "0.20.0" dependencies = [ "assert_fs", "blsttc", @@ -7478,7 +8437,7 @@ dependencies = [ "fs2", "hex 0.4.3", "lazy_static", - "libp2p", + "libp2p 0.54.1", "pprof", "rand 0.8.5", "rayon", @@ -7509,7 +8468,7 @@ dependencies = [ "curve25519-dalek 4.1.3", "rand_core 0.6.4", "ring 0.17.8", - "rustc_version", + "rustc_version 0.4.1", "sha2 0.10.8", "subtle", ] @@ -7623,7 +8582,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -7634,9 +8593,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.12.0" +version = "12.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" +checksum = "9fdf97c441f18a4f92425b896a4ec7a27e03631a0b1047ec4e34e9916a9a167e" dependencies = [ "debugid", "memmap2", @@ -7646,9 +8605,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.0" +version = "12.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" +checksum = "bc8ece6b129e97e53d1fbb3f61d33a6a9e5369b11d01228c068094d6d134eaea" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -7668,15 +8627,27 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -7700,7 +8671,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -7747,9 +8718,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.42" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020" +checksum = "cb797dad5fb5b76fcf519e702f4a589483b5ef06567f160c392832c1f5e44909" dependencies = [ "filetime", "libc", @@ -7758,9 +8729,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.13.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", @@ -7771,12 +8742,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.4.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ "rustix", - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -7787,33 +8758,37 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.7" +version = "0.4.8" dependencies = [ + "bytes", "color-eyre", "dirs-next", - "libp2p", + "evmlib", + "libp2p 0.54.1", + "rand 0.8.5", "serde", "serde_json", + "sn_peers_acquisition", ] [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -7889,17 +8864,15 @@ dependencies = [ [[package]] name = "tiny_http" -version = "0.12.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389915df6413a2e74fb181895f933386023c71110878cd0825588928e64cdc82" +checksum = "e0d6ef4e10d23c1efb862eecad25c5054429a71958b4eeef85eb5e7170b477ca" dependencies = [ "ascii", "chunked_transfer", - "httpdate", "log", - "rustls 0.20.9", - "rustls-pemfile 0.2.1", - "zeroize", + "time", + "url", ] [[package]] @@ -7929,7 +8902,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.54" +version = "0.1.55" dependencies = [ "dirs-next", "reqwest 0.11.27", @@ -7975,7 +8948,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -7986,7 +8959,7 @@ checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls 0.19.1", "tokio", - "webpki 0.21.4", + "webpki", ] [[package]] @@ -8019,6 +8992,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util 0.7.12", ] [[package]] @@ -8083,11 +9057,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.5.0", "serde", "serde_spanned", "toml_datetime", @@ -8119,7 +9093,7 @@ dependencies = [ "tokio-rustls 0.22.0", "tokio-stream", "tokio-util 0.6.10", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -8148,7 +9122,7 @@ dependencies = [ "prost 0.11.9", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -8186,6 +9160,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" @@ -8230,7 +9218,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -8350,18 +9338,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] -name = "tracing-wasm" -version = "0.2.1" +name = "tracing-web" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4575c663a174420fa2d78f4108ff68f65bf2fbb7dd89f33749b6e826b3626e07" +checksum = "b9e6a141feebd51f8d91ebfd785af50fca223c570b86852166caa3b141defe7c" dependencies = [ - "tracing", + "js-sys", + "tracing-core", "tracing-subscriber", "wasm-bindgen", + "web-sys", ] [[package]] @@ -8377,7 +9367,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3e785f863a3af4c800a2a669d0b64c879b538738e352607e2624d03f868dc01" dependencies = [ "crossterm 0.27.0", - "unicode-width 0.1.14", + "unicode-width", ] [[package]] @@ -8407,9 +9397,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.7" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "uint" @@ -8440,9 +9430,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.17" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-bom" @@ -8479,7 +9469,7 @@ checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" dependencies = [ "itertools 0.13.0", "unicode-segmentation", - "unicode-width 0.1.14", + "unicode-width", ] [[package]] @@ -8489,10 +9479,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] -name = "unicode-width" -version = "0.2.0" +name = "unicode-xid" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "universal-hash" @@ -8515,10 +9505,6 @@ name = "unsigned-varint" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" -dependencies = [ - "asynchronous-codec 0.6.2", - "bytes", -] [[package]] name = "unsigned-varint" @@ -8730,7 +9716,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", "wasm-bindgen-shared", ] @@ -8764,7 +9750,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8775,16 +9761,43 @@ version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +[[package]] +name = "wasm-bindgen-test" +version = "0.3.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68497a05fb21143a08a7d24fc81763384a3072ee43c44e86aad1744d6adef9d9" +dependencies = [ + "console_error_panic_hook", + "js-sys", + "minicov", + "scoped-tls", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "wasmtimer" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f656cd8858a5164932d8a90f936700860976ec21eb00e0fe2aa8cab13f6b4cf" +checksum = "c7ed9d8b15c7fb594d72bfb4b5a276f3d2029333cd93a932f376f5937f6f80ee" dependencies = [ "futures", "js-sys", "parking_lot", "pin-utils", + "serde", "slab", "wasm-bindgen", ] @@ -8819,16 +9832,6 @@ dependencies = [ "untrusted 0.7.1", ] -[[package]] -name = "webpki" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" -dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", -] - [[package]] name = "webpki-roots" version = "0.25.4" @@ -9123,9 +10126,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ "memchr", ] @@ -9297,7 +10300,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -9317,7 +10320,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -9326,7 +10329,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ - "aes 0.8.4", + "aes", "byteorder", "bzip2", "constant_time_eq", diff --git a/Cargo.toml b/Cargo.toml index 4cdf8b3458..779485a2c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,11 +2,11 @@ resolver = "2" members = [ "autonomi", - "sn_auditor", + "autonomi-cli", + "evmlib", + "evm_testnet", "sn_build_info", - "sn_cli", - "sn_client", - "sn_faucet", + "sn_evm", "sn_logging", "sn_metrics", "nat-detection", @@ -29,7 +29,7 @@ arithmetic_overflow = "forbid" mutable_transmutes = "forbid" no_mangle_const_items = "forbid" unknown_crate_types = "forbid" -unsafe_code = "forbid" +unsafe_code = "warn" trivial_casts = "warn" trivial_numeric_casts = "warn" unused_extern_crates = "warn" diff --git a/Justfile b/Justfile index 9452125df5..a6f6f90118 100644 --- a/Justfile +++ b/Justfile @@ -1,75 +1,11 @@ #!/usr/bin/env just --justfile -release_repo := "maidsafe/safe_network" - -droplet-testbed: - #!/usr/bin/env bash - - DROPLET_NAME="node-manager-testbed" - REGION="lon1" - SIZE="s-1vcpu-1gb" - IMAGE="ubuntu-20-04-x64" - SSH_KEY_ID="30878672" - - droplet_ip=$(doctl compute droplet list \ - --format Name,PublicIPv4 --no-header | grep "^$DROPLET_NAME " | awk '{ print $2 }') - - if [ -z "$droplet_ip" ]; then - droplet_id=$(doctl compute droplet create $DROPLET_NAME \ - --region $REGION \ - --size $SIZE \ - --image $IMAGE \ - --ssh-keys $SSH_KEY_ID \ - --format ID \ - --no-header \ - --wait) - if [ -z "$droplet_id" ]; then - echo "Failed to obtain droplet ID" - exit 1 - fi - - echo "Droplet ID: $droplet_id" - echo "Waiting for droplet IP address..." - droplet_ip=$(doctl compute droplet get $droplet_id --format PublicIPv4 --no-header) - while [ -z "$droplet_ip" ]; do - echo "Still waiting to obtain droplet IP address..." - sleep 5 - droplet_ip=$(doctl compute droplet get $droplet_id --format PublicIPv4 --no-header) - done - fi - echo "Droplet IP address: $droplet_ip" - - nc -zw1 $droplet_ip 22 - exit_code=$? - while [ $exit_code -ne 0 ]; do - echo "Waiting on SSH to become available..." - sleep 5 - nc -zw1 $droplet_ip 22 - exit_code=$? - done - - cargo build --release --target x86_64-unknown-linux-musl - scp -r ./target/x86_64-unknown-linux-musl/release/safenode-manager \ - root@$droplet_ip:/root/safenode-manager - -kill-testbed: - #!/usr/bin/env bash - - DROPLET_NAME="node-manager-testbed" - - droplet_id=$(doctl compute droplet list \ - --format Name,ID --no-header | grep "^$DROPLET_NAME " | awk '{ print $2 }') - - if [ -z "$droplet_ip" ]; then - echo "Deleting droplet with ID $droplet_id" - doctl compute droplet delete $droplet_id - fi - -build-release-artifacts arch: +build-release-artifacts arch nightly="false": #!/usr/bin/env bash set -e arch="{{arch}}" + nightly="{{nightly}}" supported_archs=( "x86_64-pc-windows-msvc" "x86_64-apple-darwin" @@ -122,28 +58,29 @@ build-release-artifacts arch: cross_container_opts="--env \"GENESIS_PK=$GENESIS_PK\" --env \"GENESIS_SK=$GENESIS_SK\" --env \"FOUNDATION_PK=$FOUNDATION_PK\" --env \"NETWORK_ROYALTIES_PK=$NETWORK_ROYALTIES_PK\" --env \"PAYMENT_FORWARD_PK=$PAYMENT_FORWARD_PK\"" export CROSS_CONTAINER_OPTS=$cross_container_opts + nightly_feature="" + if [[ "$nightly" == "true" ]]; then + nightly_feature="--features nightly" + fi + if [[ $arch == arm* || $arch == armv7* || $arch == aarch64* ]]; then echo "Passing to cross CROSS_CONTAINER_OPTS=$CROSS_CONTAINER_OPTS" cargo binstall --no-confirm cross - cross build --release --target $arch --bin faucet --features=distribution - cross build --release --target $arch --bin nat-detection - cross build --release --target $arch --bin node-launchpad - cross build --release --features="network-contacts,distribution" --target $arch --bin safe - cross build --release --features=network-contacts --target $arch --bin safenode - cross build --release --target $arch --bin safenode-manager - cross build --release --target $arch --bin safenodemand - cross build --release --target $arch --bin safenode_rpc_client - cross build --release --target $arch --bin sn_auditor + cross build --release --target $arch --bin nat-detection $nightly_feature + cross build --release --target $arch --bin node-launchpad $nightly_feature + cross build --release --features=network-contacts --target $arch --bin autonomi $nightly_feature + cross build --release --features=network-contacts,websockets --target $arch --bin safenode $nightly_feature + cross build --release --target $arch --bin safenode-manager $nightly_feature + cross build --release --target $arch --bin safenodemand $nightly_feature + cross build --release --target $arch --bin safenode_rpc_client $nightly_feature else - cargo build --release --target $arch --bin faucet --features=distribution - cargo build --release --target $arch --bin nat-detection - cargo build --release --target $arch --bin node-launchpad - cargo build --release --features="network-contacts,distribution" --target $arch --bin safe - cargo build --release --features=network-contacts --target $arch --bin safenode - cargo build --release --target $arch --bin safenode-manager - cargo build --release --target $arch --bin safenodemand - cargo build --release --target $arch --bin safenode_rpc_client - cargo build --release --target $arch --bin sn_auditor + cargo build --release --target $arch --bin nat-detection $nightly_feature + cargo build --release --target $arch --bin node-launchpad $nightly_feature + cargo build --release --features=network-contacts --target $arch --bin autonomi $nightly_feature + cargo build --release --features=network-contacts,websockets --target $arch --bin safenode $nightly_feature + cargo build --release --target $arch --bin safenode-manager $nightly_feature + cargo build --release --target $arch --bin safenodemand $nightly_feature + cargo build --release --target $arch --bin safenode_rpc_client $nightly_feature fi find target/$arch/release -maxdepth 1 -type f -exec cp '{}' artifacts \; @@ -176,15 +113,13 @@ make-artifacts-directory: package-all-bins: #!/usr/bin/env bash set -e - just package-bin "faucet" just package-bin "nat-detection" just package-bin "node-launchpad" - just package-bin "safe" + just package-bin "autonomi" just package-bin "safenode" - just package-bin "safenode_rpc_client" just package-bin "safenode-manager" just package-bin "safenodemand" - just package-bin "sn_auditor" + just package-bin "safenode_rpc_client" package-bin bin version="": #!/usr/bin/env bash @@ -203,32 +138,27 @@ package-bin bin version="": bin="{{bin}}" supported_bins=(\ - "faucet" \ "nat-detection" \ "node-launchpad" \ - "safe" \ + "autonomi" \ "safenode" \ "safenode-manager" \ "safenodemand" \ - "safenode_rpc_client" \ - "sn_auditor") + "safenode_rpc_client") crate_dir_name="" # In the case of the node manager, the actual name of the crate is `sn-node-manager`, but the # directory it's in is `sn_node_manager`. bin="{{bin}}" case "$bin" in - faucet) - crate_dir_name="sn_faucet" - ;; nat-detection) crate_dir_name="nat-detection" ;; node-launchpad) crate_dir_name="node-launchpad" ;; - safe) - crate_dir_name="sn_cli" + autonomi) + crate_dir_name="autonomi-cli" ;; safenode) crate_dir_name="sn_node" @@ -242,9 +172,6 @@ package-bin bin version="": safenode_rpc_client) crate_dir_name="sn_node_rpc_client" ;; - sn_auditor) - crate_dir_name="sn_auditor" - ;; *) echo "The $bin binary is not supported" exit 1 @@ -281,15 +208,13 @@ upload-all-packaged-bins-to-s3: set -e binaries=( - faucet nat-detection node-launchpad - safe + autonomi safenode safenode-manager safenode_rpc_client safenodemand - sn_auditor ) for binary in "${binaries[@]}"; do just upload-packaged-bin-to-s3 "$binary" @@ -300,17 +225,14 @@ upload-packaged-bin-to-s3 bin_name: set -e case "{{bin_name}}" in - faucet) - bucket="sn-faucet" - ;; nat-detection) bucket="nat-detection" ;; node-launchpad) bucket="node-launchpad" ;; - safe) - bucket="sn-cli" + autonomi) + bucket="autonomi-cli" ;; safenode) bucket="sn-node" @@ -324,9 +246,6 @@ upload-packaged-bin-to-s3 bin_name: safenode_rpc_client) bucket="sn-node-rpc-client" ;; - sn_auditor) - bucket="sn-auditor" - ;; *) echo "The {{bin_name}} binary is not supported" exit 1 @@ -351,6 +270,59 @@ upload-packaged-bin-to-s3 bin_name: fi done +delete-s3-bin bin_name version: + #!/usr/bin/env bash + set -e + + case "{{bin_name}}" in + nat-detection) + bucket="nat-detection" + ;; + node-launchpad) + bucket="node-launchpad" + ;; + autonomi) + bucket="autonomi-cli" + ;; + safenode) + bucket="sn-node" + ;; + safenode-manager) + bucket="sn-node-manager" + ;; + safenodemand) + bucket="sn-node-manager" + ;; + safenode_rpc_client) + bucket="sn-node-rpc-client" + ;; + *) + echo "The {{bin_name}} binary is not supported" + exit 1 + ;; + esac + + architectures=( + "x86_64-pc-windows-msvc" + "x86_64-apple-darwin" + "aarch64-apple-darwin" + "x86_64-unknown-linux-musl" + "arm-unknown-linux-musleabi" + "armv7-unknown-linux-musleabihf" + "aarch64-unknown-linux-musl" + ) + + for arch in "${architectures[@]}"; do + zip_filename="{{bin_name}}-{{version}}-${arch}.zip" + tar_filename="{{bin_name}}-{{version}}-${arch}.tar.gz" + s3_zip_path="s3://$bucket/$zip_filename" + s3_tar_path="s3://$bucket/$tar_filename" + aws s3 rm "$s3_zip_path" + echo "deleted $s3_zip_path" + aws s3 rm "$s3_tar_path" + echo "deleted $s3_tar_path" + done + package-all-architectures: #!/usr/bin/env bash set -e @@ -391,15 +363,13 @@ package-arch arch: cd artifacts/$architecture/release binaries=( - faucet nat-detection node-launchpad - safe + autonomi safenode safenode-manager safenode_rpc_client safenodemand - sn_auditor ) if [[ "$architecture" == *"windows"* ]]; then @@ -412,16 +382,3 @@ package-arch arch: fi cd ../../.. - -node-man-integration-tests: - #!/usr/bin/env bash - set -e - - cargo build --release --bin safenode --bin faucet --bin safenode-manager - cargo run --release --bin safenode-manager -- local run \ - --node-path target/release/safenode \ - --faucet-path target/release/faucet - peer=$(cargo run --release --bin safenode-manager -- local status \ - --json | jq -r .nodes[-1].listen_addr[0]) - export SAFE_PEERS=$peer - cargo test --release --package sn-node-manager --test e2e -- --nocapture diff --git a/README.md b/README.md index 6addfb65e8..48751adf0e 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,27 @@ -# The Safe Network +# The Autonomi Network (previously Safe Network) -[SafenetForum.org](https://safenetforum.org/) +[Autonomi.com](https://autonomi.com/) Own your data. Share your disk space. Get paid for doing so.
-The Data on the Safe Network is Decentralised, Autonomous, and built atop of Kademlia and +The Data on the Autonomi Network is Decentralised, Autonomous, and built atop of Kademlia and Libp2p.
## Table of Contents -- [For Users](#for-Users) +- [For Users](#for-users) - [For Developers](#for-developers) - [For the Technical](#for-the-technical) -- [Using a Local Network](#Using-a-local-network) +- [Using a Local Network](#using-a-local-network) - [Metrics Dashboard](#metrics-dashboard) ### For Users -- [CLI](https://github.com/maidsafe/safe_network/blob/main/sn_cli/README.md) The Command Line +- [CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi_cli/README.md) The Command Line Interface, allowing users to interact with the network from their terminal. - [Node](https://github.com/maidsafe//safe_network/blob/main/sn_node/README.md) The backbone of the safe network. Nodes can be run on commodity hardware and provide storage space and validation of transactions to the network. +- Web App: Coming Soon! #### Building the Node from Source @@ -28,66 +29,52 @@ If you wish to build a version of `safenode` from source, some special considera if you want it to connect to the current beta network. You should build from the `stable` branch, as follows: + ``` git checkout stable -export FOUNDATION_PK=8c89a2230096d07b3013089ffd594b23f468e72f19672c3dc1e50747c4c954fbf54ef8e98809e2d2253b14321e2123ad -export GENESIS_PK=93fa3c5e1b68ace4fb02845f89f1eb5ff42c64cd31ee1b908d7c3bbb236d009ae4ae9a1a16d42bc7586e88db1248494c -export NETWORK_ROYALTIES_PK=87ec7d1e5e0252d29b1b0ac42a04d7e8daf7dd9f212a23dbc4a9ca5a6442fdab74196ef7cca150ecd6f9848d49148ed4 -export PAYMENT_FORWARD_PK=887c9371cb9a1467cd45b6f31367520ab44cc40281d52039acfd6f967bdb0c3e214bb81f2a0adcf683bd6608980a7b5f +export FOUNDATION_PK=88a82d718d16dccc839188eddc9a46cb216667c940cd46285199458c919a170a55490db09763ae216ed25e9db78c3576 +export GENESIS_PK=aa3526db2dbc43998e0b541b8455e2ce9dd4f1cad80090e671da16e3cd11cd5e3550f74c3cefd09ad253d93cacae2320 +export NETWORK_ROYALTIES_PK=8b5463a2c8142959a7b7cfd9295587812eb07ccbe13a85865503c8004eeeb6889ccace3588dcf9f7396784d9ee48f4d5 +export PAYMENT_FORWARD_PK=87d5b511a497183c945df63ab8790a4b94cfe452d00bfbdb39e41ee861384fe0de716a224da1c6fd11356de49877dfc2 cargo build --release --features=network-contacts --bin safenode ``` -For more information about the keys, please refer to the [Keys](#keys) section below. - -### For Developers - -#### Connecting to the Beta Network - -##### Keys +#### Running the Node -Various keys in the network control where initial funds are distributed and how ongoing fees and -royalties are collected. They are also used as part of the node version string, to determine whether -a connecting node is compatible. +To run a node and receive rewards, you need to specify your Ethereum address as a parameter. Rewards are paid to the specified address. -For a client to connect to the current beta network, these keys must be set at build time: ``` -FOUNDATION_PK=8c89a2230096d07b3013089ffd594b23f468e72f19672c3dc1e50747c4c954fbf54ef8e98809e2d2253b14321e2123ad -GENESIS_PK=93fa3c5e1b68ace4fb02845f89f1eb5ff42c64cd31ee1b908d7c3bbb236d009ae4ae9a1a16d42bc7586e88db1248494c -NETWORK_ROYALTIES_PK=87ec7d1e5e0252d29b1b0ac42a04d7e8daf7dd9f212a23dbc4a9ca5a6442fdab74196ef7cca150ecd6f9848d49148ed4 -PAYMENT_FORWARD_PK=887c9371cb9a1467cd45b6f31367520ab44cc40281d52039acfd6f967bdb0c3e214bb81f2a0adcf683bd6608980a7b5f +cargo run --release --bin safenode --features=network-contacts -- --rewards-address ``` -##### Features +More options about EVM Network below. + +### For Developers + +#### Build You should also build `safe` with the `network-contacts` and `distribution` features enabled: + ``` cargo build --release --features="network-contacts,distribution" --bin safe ``` For `safenode`, only the `network-contacts` feature should be required: + ``` cargo build --release --features=network-contacts --bin safenode ``` -#### Utility Scripts +#### Main Crates -When you start a network there are a few scripts to aid with basic processes: - -- `resources/scripts/claim-genesis.sh` which will claim the genesis tokens for a wallet on a launched network (if you - have set up the foundation wallet locally by adding a `client/account_secret` and regenerating the wallet or directly - adding the `client/wallet/main_secret_key` itself). -- `resources/scripts/make-wallets.sh` which if you have a wallet with a balance will create a number of wallets with - another balance. eg `resources/scripts/make-wallets.sh 5 1` will make 5 wallets with 1 token. -- `resources/scripts/upload-random-data` will use the existing `client` to upload random data to the network. - -- [Client](https://github.com/maidsafe/safe_network/blob/main/sn_client/README.md) The client APIs - allowing use of the SafeNetwork to users and developers. -- [Registers](https://github.com/maidsafe/safe_network/blob/main/sn_registers/README.md) The CRDT - registers structures available on the network. +- [Autonomi API](https://github.com/maidsafe/safe_network/blob/main/autonomi/README.md) The client APIs + allowing use of the Autonomi Network to users and developers. +- [Autonomi CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi_cli/README.md) The Command Line + Interface, allowing users to interact with the network from their terminal. +- [Node](https://github.com/maidsafe/safe_network/blob/main/sn_node/README.md) The backbone of the + autonomi network. Nodes can be run on commodity hardware and run the Network. - [Node Manager](https://github.com/maidsafe/safe_network/blob/main/sn_node_manager/README.md) Use to create a local network for development and testing. -- [Faucet](https://github.com/maidsafe/safe_network/blob/main/sn_faucet/README.md) The local faucet - server, used to claim genesis and request tokens from the network. - [Node RPC](https://github.com/maidsafe/safe_network/blob/main/sn_node_rpc_client/README.md) The RPC server used by the nodes to expose API calls to the outside world. @@ -99,22 +86,12 @@ The `websockets` feature is available for the `sn_networking` crate, and above, tcp over websockets. If building for `wasm32` then `websockets` are enabled by default as this is the only method -avilable to communicate with a network as things stand. (And that network must have `websockets` +available to communicate with a network as things stand. (And that network must have `websockets` enabled.) -##### Building for wasm32 - -- Install [wasm-pack](https://rustwasm.github.io/wasm-pack/installer/) -- `cd sn_client && wasm-pack build` - -You can then pull this package into a web app eg, to use it. +#### Building for wasm32 -eg `await safe.get_data("/ip4/127.0.0.1/tcp/59324/ws/p2p/12D3KooWG6kyBwLVHj5hYK2SqGkP4GqrCz5gfwsvPBYic4c4TeUz","9d7e115061066126482a229822e6d68737bd67d826c269762c0f64ce87af6b4c")` - -#### Browser usage - -Browser usage is highly experimental, but the wasm32 target for `sn_client` _should_ work here. -YMMV until stabilised. +WASM support for the autonomi API is currently under active development. More docs coming soon. ### For the Technical @@ -127,201 +104,73 @@ YMMV until stabilised. - [Protocol](https://github.com/maidsafe/safe_network/blob/main/sn_protocol/README.md) The protocol used by the safe network. - [Transfers](https://github.com/maidsafe/safe_network/blob/main/sn_transfers/README.md) The - transfers crate, used to send and receive tokens on the network. + transfers crate, used to send and receive tokens Native to the network. +- [Registers](https://github.com/maidsafe/safe_network/blob/main/sn_registers/README.md) The + registers crate, used for the Register CRDT data type on the network. - [Peers Acquisition](https://github.com/maidsafe/safe_network/blob/main/sn_peers_acquisition/README.md) - The peers peers acqisition crate, or: how the network layer discovers bootstrap peers. + The peers acquisition crate, or: how the network layer discovers bootstrap peers. - [Build Info](https://github.com/maidsafe/safe_network/blob/main/sn_build_info/README.md) Small helper used to get the build/commit versioning info for debug purposes. -## Using a Local Network - -We can explore the network's features by using multiple node processes to form a local network. - -The latest version of [Rust](https://www.rust-lang.org/learn/get-started) should be installed. If -you already have an installation, use `rustup update` to get the latest version. - -Run all the commands from the root of this repository. +### Using a Local Network -### Run the Network +We can explore the network's features by using multiple node processes to form a local network. e also need to run a +local EVM network for our nodes and client to connect to. Follow these steps to create a local network: -1. Create the test network:
- -```bash -cargo run --bin safenode-manager --features local-discovery -- local run --build -``` - -2. Verify node status:
+##### 1. Prerequisites -```bash -cargo run --bin safenode-manager --features local-discovery -- status -``` - -3. Build a tokenized wallet:
- -```bash -cargo run --bin safe --features local-discovery -- wallet get-faucet 127.0.0.1:8000 -``` +The latest version of [Rust](https://www.rust-lang.org/learn/get-started) should be installed. If you already have an installation, use `rustup update` to get the latest version. -The node manager's `run` command starts the node processes and a faucet process, the latter of -which will dispense tokens for use with the network. The `status` command should show twenty-five -running nodes. The `wallet` command retrieves some tokens, which enables file uploads. +Run all the commands from the root of this repository. -### Files +If you haven't already, install Foundry. We need to have access to Anvil, which is packaged with Foundry, to run an EVM node: https://book.getfoundry.sh/getting-started/installation -The file storage capability can be demonstrated by uploading files to the local network, then -retrieving them. +To collect rewards for you nodes, you will need an EVM address, you can create one using [metamask](https://metamask.io/). -Upload a file or a directory: +##### 2. Run a local EVM node -```bash -cargo run --bin safe --features local-discovery -- files upload +```sh +cargo run --bin evm_testnet ``` -The output will show that the upload costs some tokens. +This creates a CSV file with the EVM network params in your data directory. -Now download the files again: +##### 3. Create the test network and pass the EVM params + `--rewards-address` _is the address where you will receive your node earnings on._ ```bash -cargo run --bin safe --features local-discovery -- files download +cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address ``` -### Folders - -The folders storage capability can be demonstrated by storing folders on the network, making -changes and syncing them with the stored version on the network, as well as downloading the entire -folders hierarchy onto a local directory. - -All the following commands act on the current directory by default, but since we are building the -CLI binary to run it, we will have to always provide the directory we want them to act as a path -argument. -When otherwise running directly an already built CLI binary, we can simply make sure we are located -at the directory we want to act on without the need of providing the path as argument. +The EVM Network parameters are loaded from the CSV file in your data directory automatically when the `local` feature flag is enabled (`--features=local`). -Initialise a directory to then be able to track changes made on it, and sync them up with the -network: +##### 4. Verify node status ```bash -cargo run --bin safe --features local-discovery -- folders init +cargo run --bin safenode-manager --features local -- status ``` -Make sure you made a backup copy of the "recovery secret" generated by the above command, or the -one you have provided when prompted. +The node manager's `run` command starts the node processes. The `status` command should show twenty-five +running nodes. -If any changes are now made to files or directories within this folder (at this point all files and -folders are considered new since it has just been initalised for tracking), before trying to push -those changes to the network, we can get a report of the changes that have been made locally: +##### 5. Uploading and Downloading Data -```bash -cargo run --bin safe --features local-discovery -- folders status -``` - -We can now push all local changes made to files and directories to the network, as well as pull any -changes that could have been made to the version stored on the network since last time we synced -with it: - -```bash -cargo run --bin safe --features local-discovery -- folders sync -``` +To upload a file or a directory, you need to set the `SECRET_KEY` environment variable to your EVM secret key: -Now that's all stored on the network, you can download the folders onto any other path by providing -it as the target directory to the following command (you will be prompted to enter the "recovery -secret" you obtained when initialising the directory with `init` command): +> When running a local network, you can use the `SECRET_KEY` printed by the `evm_testnet` command [step 2](#2-run-a-local-evm-node) as it has all the money. ```bash -cargo run --bin safe --features local-discovery -- folders download -``` - -### Token Transfers - -Use your local wallet to demonstrate sending tokens and receiving transfers. - -First, get your wallet address, this address can be safely shared publicly - -``` -cargo run --bin safe -- wallet address -``` - -You can also get your balance with: - +SECRET_KEY= cargo run --bin autonomi --features local -- file upload ``` -cargo run --bin safe -- wallet balance -``` - -Now to send some tokens to an address: - -``` -cargo run --bin safe --features local-discovery -- wallet send 2 [address] -``` - -This will output a transfer as a hex string, which should be sent to the recipient. -This transfer is encrypted to the recipient so only the recipient can read and redeem it. -To receive a transfer, simply paste it after the wallet receive command: - -``` -cargo run --bin safe --features local-discovery -- wallet receive [transfer] -``` - -#### Out of band transaction signing -When you want to transfer tokens from a cold storage or hardware wallet, you can create and sign -the transaction offline. This is done to prevent the private key from being exposed to any online -threats. -For this type of scenarios you can create a watch-only wallet (it holds only a public key) on the -online device, while using a hot-wallet (which holds the secret key) on a device that is offline. -The following steps are a simple guide for performing such an operation. +The output will print out the address at which the content was uploaded. -Steps on the online device/computer with a watch-only wallet: +Now to download the files again: -1. Create a watch-only wallet using the hex-encoded public key: - `cargo run --release --bin safe -- wowallet create ` - -2. Deposit a cash-note, owned by the public key used above when creating, into the watch-only - wallet: - `cargo run --release --bin safe -- wowallet deposit --cash-note ` - -3. Build an unsigned transaction: - `cargo run --release --bin safe -- wowallet transaction ` - -4. Copy the built unsigned Tx generated by the above command, and send it out-of-band to the - desired device where the hot-wallet can be loaded. - -Steps on the offline device/computer with the corresponding hot-wallet: - -5. If you still don't have a hot-wallet created, which owns the cash-notes used to build the - unsigned transaction, create it with the corresponding secret key: - `cargo run --release --bin safe -- wallet create --key ` - -6. Use the hot-wallet to sign the built transaction: - `cargo run --release --bin safe -- wallet sign ` - -7. Copy the signed Tx generated by the above command, and send it out-of-band back to the online - device. - -Steps on the online device/computer with the watch-only wallet: - -8. Broadcast the signed transaction to the network using the watch-only wallet: - `cargo run --release --bin safe -- wowallet broadcast ` - -9. Deposit the change cash-note to the watch-only wallet: - `cargo run --release --bin safe -- wowallet deposit ` - -10. Send/share the output cash-note generated by the above command at step #8 to/with the - recipient. - -### Auditing - -We can verify a spend, optionally going back to the genesis transaction: - -``` -cargo run --bin safe --features local-discovery -- wallet verify [--genesis] [spend address] -``` - -All spends from genesis can be audited: - -``` -cargo run --bin safe --features local-discovery -- wallet audit +```bash +cargo run --bin autonomi --features local -- file download ``` ### Registers @@ -332,11 +181,11 @@ their use by two users to exchange text messages in a crude chat application. In the first terminal, using the registers example, Alice creates a register: ``` -cargo run --example registers --features=local-discovery -- --user alice --reg-nickname myregister +cargo run --example registers --features=local -- --user alice --reg-nickname myregister ``` Alice can now write a message to the register and see anything written by anyone else. For example -she might enter the text "hello, who's there?" which is written to the register and then shown as +she might enter the text "Hello, who's there?" which is written to the register and then shown as the "Latest value", in her terminal: ``` @@ -350,15 +199,15 @@ Latest value (more than one if concurrent writes were made): -------------- Enter a blank line to receive updates, or some text to be written. -hello, who's there? -Writing msg (offline) to Register: 'hello, who's there?' +Hello, who's there? +Writing msg (offline) to Register: 'Hello, who's there?' Syncing with SAFE in 2s... synced! Current total number of items in Register: 1 Latest value (more than one if concurrent writes were made): -------------- -[alice]: hello, who's there? +[Alice]: Hello, who's there? -------------- Enter a blank line to receive updates, or some text to be written. @@ -367,7 +216,7 @@ Enter a blank line to receive updates, or some text to be written. For anyone else to write to the same register they need to know its xor address, so to communicate with her friend Bob, Alice needs to find a way to send it to Bob. In her terminal, this is the -value starting "50f4..." in the output above. This value it will be different each time you run the +value starting "50f4..." in the output above. This value will be different each time you run the example to create a register. Having received the xor address, in another terminal Bob can access the same register to see the @@ -375,7 +224,7 @@ message Alice has written, and he can write back by running this command with th from Alice. (Note that the command should all be on one line): ``` -cargo run --example registers --features=local-discovery -- --user bob --reg-address 50f4c9d55aa1f4fc19149a86e023cd189e509519788b4ad8625a1ce62932d1938cf4242e029cada768e7af0123a98c25973804d84ad397ca65cb89d6580d04ff07e5b196ea86f882b925be6ade06fc8d +cargo run --example registers --features=local -- --user bob --reg-address 50f4c9d55aa1f4fc19149a86e023cd189e509519788b4ad8625a1ce62932d1938cf4242e029cada768e7af0123a98c25973804d84ad397ca65cb89d6580d04ff07e5b196ea86f882b925be6ade06fc8d ``` After retrieving the register and displaying the message from Alice, Bob can reply and at any time, @@ -387,7 +236,7 @@ Here's Bob writing from his terminal: ``` Latest value (more than one if concurrent writes were made): -------------- -[alice]: hello, who's there? +[Alice]: Hello, who's there? -------------- Enter a blank line to receive updates, or some text to be written. @@ -402,7 +251,7 @@ A second example, `register_inspect` allows you to view its structure and conten the above example you again provide the address of the register. For example: ``` -cargo run --example register_inspect --features=local-discovery -- --reg-address 50f4c9d55aa1f4fc19149a86e023cd189e509519788b4ad8625a1ce62932d1938cf4242e029cada768e7af0123a98c25973804d84ad397ca65cb89d6580d04ff07e5b196ea86f882b925be6ade06fc8d +cargo run --example register_inspect --features=local -- --reg-address 50f4c9d55aa1f4fc19149a86e023cd189e509519788b4ad8625a1ce62932d1938cf4242e029cada768e7af0123a98c25973804d84ad397ca65cb89d6580d04ff07e5b196ea86f882b925be6ade06fc8d ``` After printing a summary of the register, this example will display @@ -522,7 +371,7 @@ Listening to royalty payment events: ``` $ cargo run --bin safenode_rpc_client -- 127.0.0.1:34416 transfers -Listening to transfers notifications... (press Ctrl+C to exit) +Listening to transfer notifications... (press Ctrl+C to exit) New transfer notification received for PublicKey(0c54..5952), containing 1 cash note/s. CashNote received with UniquePubkey(PublicKey(19ee..1580)), value: 0.000000001 @@ -535,7 +384,7 @@ The `transfers` command can provide a path for royalty payment cash notes: ``` $ cargo run --release --bin=safenode_rpc_client -- 127.0.0.1:34416 transfers ./royalties-cash-notes -Listening to transfers notifications... (press Ctrl+C to exit) +Listening to transfer notifications... (press Ctrl+C to exit) Writing cash notes to: ./royalties-cash-notes ``` diff --git a/adr/libp2p/identify-interval.md b/adr/libp2p/identify-interval.md index 59dd9db4c6..1b068c1637 100644 --- a/adr/libp2p/identify-interval.md +++ b/adr/libp2p/identify-interval.md @@ -8,7 +8,7 @@ Accepted Idle nodes in a network of moderate data have a high ongoing bandwidth. -This appears to be because of the identify polling of nodes, which occurs at the deafult libp2p rate, of once per 5 minutes. +This appears to be because of the identify polling of nodes, which occurs at the default libp2p rate, of once per 5 minutes. We see ~1mb/s traffic on nodes in a moderate network. diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml new file mode 100644 index 0000000000..83adf193d2 --- /dev/null +++ b/autonomi-cli/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "autonomi-cli" +version = "0.1.1" +edition = "2021" + +[[bin]] +name = "autonomi" +path = "src/main.rs" + +[features] +default = ["metrics"] +local = ["sn_peers_acquisition/local", "autonomi/local"] +metrics = ["sn_logging/process-metrics"] +network-contacts = ["sn_peers_acquisition/network-contacts"] + +[[bench]] +name = "files" +harness = false + +[dependencies] +autonomi = { path = "../autonomi", version = "0.2.0", features = [ + "data", + "fs", + "registers", + "loud", +] } +clap = { version = "4.2.1", features = ["derive"] } +color-eyre = "~0.6" +dirs-next = "~2.0.0" +indicatif = { version = "0.17.5", features = ["tokio"] } +tokio = { version = "1.32.0", features = [ + "io-util", + "macros", + "parking_lot", + "rt", + "sync", + "time", + "fs", +] } +tracing = { version = "~0.1.26" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_logging = { path = "../sn_logging", version = "0.2.37" } + +[dev-dependencies] +autonomi = { path = "../autonomi", version = "0.2.0", features = [ + "data", + "fs", +] } +eyre = "0.6.8" +criterion = "0.5.1" +tempfile = "3.6.0" +rand = { version = "~0.8.5", features = ["small_rng"] } +rayon = "1.8.0" + +[lints] +workspace = true diff --git a/autonomi-cli/README.md b/autonomi-cli/README.md new file mode 100644 index 0000000000..b10d2128fb --- /dev/null +++ b/autonomi-cli/README.md @@ -0,0 +1,27 @@ +# A CLI for the Autonomi Network + +``` +Usage: autonomi_cli [OPTIONS] + +Commands: + file Operations related to file handling + register Operations related to register management + vault Operations related to vault management + help Print this message or the help of the given subcommand(s) + +Options: + --log-output-dest + Specify the logging output destination. [default: data-dir] + --log-format + Specify the logging format. + --peer + Peer(s) to use for bootstrap, in a 'multiaddr' format containing the peer ID [env: SAFE_PEERS=] + --timeout + The maximum duration to wait for a connection to the network before timing out + -x, --no-verify + Prevent verification of data storage on the network + -h, --help + Print help (see more with '--help') + -V, --version + Print version +``` \ No newline at end of file diff --git a/sn_cli/benches/files.rs b/autonomi-cli/benches/files.rs similarity index 66% rename from sn_cli/benches/files.rs rename to autonomi-cli/benches/files.rs index 288801d980..f545936334 100644 --- a/sn_cli/benches/files.rs +++ b/autonomi-cli/benches/files.rs @@ -10,6 +10,7 @@ use criterion::{criterion_group, criterion_main, Criterion, Throughput}; use rand::{thread_rng, Rng}; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use std::{ + collections::HashSet, fs::File, io::Write, path::{Path, PathBuf}, @@ -20,18 +21,21 @@ use tempfile::tempdir; const SAMPLE_SIZE: usize = 20; +// Default deployer wallet of the testnet. +const DEFAULT_WALLET_PRIVATE_KEY: &str = + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + // This procedure includes the client startup, which will be measured by criterion as well. // As normal user won't care much about initial client startup, // but be more alerted on communication speed during transmission. -// It will be better to execute bench test with `local-discovery`, +// It will be better to execute bench test with `local`, // to make the measurement results reflect speed improvement or regression more accurately. -fn safe_files_upload(dir: &str) { - let output = Command::new("./target/release/safe") - .arg("files") +fn autonomi_file_upload(dir: &str) -> String { + let autonomi_cli_path = get_cli_path(); + let output = Command::new(autonomi_cli_path) + .arg("file") .arg("upload") .arg(dir) - .arg("--retry-strategy") // no retries - .arg("quick") .output() .expect("Failed to execute command"); @@ -39,20 +43,36 @@ fn safe_files_upload(dir: &str) { let err = output.stderr; let err_string = String::from_utf8(err).expect("Failed to parse error string"); panic!("Upload command executed with failing error code: {err_string:?}"); + } else { + let out = output.stdout; + let out_string = String::from_utf8(out).expect("Failed to parse output string"); + let address = out_string + .lines() + .find(|line| line.starts_with("At address:")) + .expect("Failed to find the address of the uploaded file"); + let address = address.trim_start_matches("At address: "); + address.to_string() } } -fn safe_files_download() { - let output = Command::new("./target/release/safe") - .arg("files") - .arg("download") - .output() - .expect("Failed to execute command"); - - if !output.status.success() { - let err = output.stderr; - let err_string = String::from_utf8(err).expect("Failed to parse error string"); - panic!("Download command executed with failing error code: {err_string:?}"); +fn autonomi_file_download(uploaded_files: HashSet) { + let autonomi_cli_path = get_cli_path(); + + let temp_dir = tempdir().expect("Failed to create temp dir"); + for address in uploaded_files.iter() { + let output = Command::new(autonomi_cli_path.clone()) + .arg("file") + .arg("download") + .arg(address) + .arg(temp_dir.path()) + .output() + .expect("Failed to execute command"); + + if !output.status.success() { + let err = output.stderr; + let err_string = String::from_utf8(err).expect("Failed to parse error string"); + panic!("Download command executed with failing error code: {err_string:?}"); + } } } @@ -71,23 +91,32 @@ fn generate_file(path: &PathBuf, file_size_mb: usize) { assert_eq!(file_size_mb as f64, size); } -fn fund_cli_wallet() { - let _ = Command::new("./target/release/safe") - .arg("wallet") - .arg("get-faucet") - .arg("127.0.0.1:8000") - .output() - .expect("Failed to execute 'safe wallet get-faucet' command"); +fn get_cli_path() -> PathBuf { + let mut path = PathBuf::new(); + if let Ok(val) = std::env::var("CARGO_TARGET_DIR") { + path.push(val); + } else { + path.push("target"); + } + path.push("release"); + path.push("autonomi_cli"); + path } fn criterion_benchmark(c: &mut Criterion) { // Check if the binary exists - if !Path::new("./target/release/safe").exists() { - eprintln!("Error: Binary ./target/release/safe does not exist. Please make sure to compile your project first"); + let cli_path = get_cli_path(); + if !Path::new(&cli_path).exists() { + eprintln!("Error: Binary {cli_path:?} does not exist. Please make sure to compile your project first"); exit(1); } + if std::env::var("SECRET_KEY").is_err() { + std::env::set_var("SECRET_KEY", DEFAULT_WALLET_PRIVATE_KEY); + } + let sizes: [u64; 2] = [1, 10]; // File sizes in MB. Add more sizes as needed + let mut uploaded_files = HashSet::new(); for size in sizes.iter() { let temp_dir = tempdir().expect("Failed to create temp dir"); @@ -102,7 +131,6 @@ fn criterion_benchmark(c: &mut Criterion) { let path = temp_dir_path.join(format!("random_file_{size}_mb_{idx}")); generate_file(&path, *size as usize); }); - fund_cli_wallet(); // Wait little bit for the fund to be settled. std::thread::sleep(Duration::from_secs(10)); @@ -118,9 +146,12 @@ fn criterion_benchmark(c: &mut Criterion) { // Set the throughput to be reported in terms of bytes group.throughput(Throughput::Bytes(size * 1024 * 1024)); - let bench_id = format!("safe files upload {size}mb"); + let bench_id = format!("autonomi files upload {size}mb"); group.bench_function(bench_id, |b| { - b.iter(|| safe_files_upload(temp_dir_path_str)) + b.iter(|| { + let uploaded_address = autonomi_file_upload(temp_dir_path_str); + uploaded_files.insert(uploaded_address); + }) }); group.finish(); } @@ -146,8 +177,10 @@ fn criterion_benchmark(c: &mut Criterion) { // Set the throughput to be reported in terms of bytes group.throughput(Throughput::Bytes(total_size * 1024 * 1024)); - let bench_id = "safe files download".to_string(); - group.bench_function(bench_id, |b| b.iter(safe_files_download)); + let bench_id = "autonomi files download".to_string(); + group.bench_function(bench_id, |b| { + b.iter(|| autonomi_file_download(uploaded_files.clone())) + }); group.finish(); } diff --git a/autonomi-cli/src/access/data_dir.rs b/autonomi-cli/src/access/data_dir.rs new file mode 100644 index 0000000000..af0db16c2c --- /dev/null +++ b/autonomi-cli/src/access/data_dir.rs @@ -0,0 +1,19 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use color_eyre::eyre::{eyre, Context, Result}; +use std::path::PathBuf; + +pub fn get_client_data_dir_path() -> Result { + let mut home_dirs = dirs_next::data_dir() + .ok_or_else(|| eyre!("Failed to obtain data dir, your OS might not be supported."))?; + home_dirs.push("safe"); + home_dirs.push("client"); + std::fs::create_dir_all(home_dirs.as_path()).wrap_err("Failed to create data dir")?; + Ok(home_dirs) +} diff --git a/autonomi-cli/src/access/keys.rs b/autonomi-cli/src/access/keys.rs new file mode 100644 index 0000000000..18310f4831 --- /dev/null +++ b/autonomi-cli/src/access/keys.rs @@ -0,0 +1,102 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::client::registers::RegisterSecretKey; +use autonomi::{get_evm_network_from_env, Wallet}; +use color_eyre::eyre::{Context, Result}; +use color_eyre::Section; +use std::env; +use std::fs; +use std::path::PathBuf; + +const SECRET_KEY_ENV: &str = "SECRET_KEY"; +const REGISTER_SIGNING_KEY_ENV: &str = "REGISTER_SIGNING_KEY"; + +const SECRET_KEY_FILE: &str = "secret_key"; +const REGISTER_SIGNING_KEY_FILE: &str = "register_signing_key"; + +/// EVM wallet +pub fn load_evm_wallet() -> Result { + let secret_key = + get_secret_key().wrap_err("The secret key is required to perform this action")?; + let network = get_evm_network_from_env()?; + let wallet = Wallet::new_from_private_key(network, &secret_key) + .wrap_err("Failed to load EVM wallet from key")?; + Ok(wallet) +} + +/// EVM wallet private key +pub fn get_secret_key() -> Result { + // try env var first + let why_env_failed = match env::var(SECRET_KEY_ENV) { + Ok(key) => return Ok(key), + Err(e) => e, + }; + + // try from data dir + let dir = super::data_dir::get_client_data_dir_path() + .wrap_err(format!("Failed to obtain secret key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir")) + .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY_ENV} env var"))?; + + // load the key from file + let key_path = dir.join(SECRET_KEY_FILE); + fs::read_to_string(&key_path) + .wrap_err("Failed to read secret key from file") + .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY_ENV} env var or have the key in a file at {key_path:?}")) + .with_suggestion(|| "the secret key should be a hex encoded string of your evm wallet private key") +} + +pub fn create_register_signing_key_file(key: RegisterSecretKey) -> Result { + let dir = super::data_dir::get_client_data_dir_path() + .wrap_err("Could not access directory to write key to")?; + let file_path = dir.join(REGISTER_SIGNING_KEY_FILE); + fs::write(&file_path, key.to_hex()).wrap_err("Could not write key to file")?; + Ok(file_path) +} + +fn parse_register_signing_key(key_hex: &str) -> Result { + RegisterSecretKey::from_hex(key_hex) + .wrap_err("Failed to parse register signing key") + .with_suggestion(|| { + "the register signing key should be a hex encoded string of a bls secret key" + }) + .with_suggestion(|| { + "you can generate a new secret key with the `register generate-key` subcommand" + }) +} + +pub fn get_register_signing_key() -> Result { + // try env var first + let why_env_failed = match env::var(REGISTER_SIGNING_KEY_ENV) { + Ok(key) => return parse_register_signing_key(&key), + Err(e) => e, + }; + + // try from data dir + let dir = super::data_dir::get_client_data_dir_path() + .wrap_err(format!("Failed to obtain register signing key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir")) + .with_suggestion(|| format!("make sure you've provided the {REGISTER_SIGNING_KEY_ENV} env var")) + .with_suggestion(|| "you can generate a new secret key with the `register generate-key` subcommand")?; + + // load the key from file + let key_path = dir.join(REGISTER_SIGNING_KEY_FILE); + let key_hex = fs::read_to_string(&key_path) + .wrap_err("Failed to read secret key from file") + .with_suggestion(|| format!("make sure you've provided the {REGISTER_SIGNING_KEY_ENV} env var or have the key in a file at {key_path:?}")) + .with_suggestion(|| "you can generate a new secret key with the `register generate-key` subcommand")?; + + // parse the key + parse_register_signing_key(&key_hex) +} + +pub fn get_register_signing_key_path() -> Result { + let dir = super::data_dir::get_client_data_dir_path() + .wrap_err("Could not access directory for register signing key")?; + let file_path = dir.join(REGISTER_SIGNING_KEY_FILE); + Ok(file_path) +} diff --git a/sn_client/src/audit.rs b/autonomi-cli/src/access/mod.rs similarity index 76% rename from sn_client/src/audit.rs rename to autonomi-cli/src/access/mod.rs index 0d9bb8daec..ac80eeca88 100644 --- a/sn_client/src/audit.rs +++ b/autonomi-cli/src/access/mod.rs @@ -6,12 +6,6 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -mod dag_crawling; -mod dag_error; -mod spend_dag; - -#[cfg(test)] -mod tests; - -pub use dag_error::{DagError, SpendFault}; -pub use spend_dag::{SpendDag, SpendDagGet}; +pub mod data_dir; +pub mod keys; +pub mod network; diff --git a/autonomi-cli/src/access/network.rs b/autonomi-cli/src/access/network.rs new file mode 100644 index 0000000000..f7e455dade --- /dev/null +++ b/autonomi-cli/src/access/network.rs @@ -0,0 +1,21 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::Multiaddr; +use color_eyre::eyre::Context; +use color_eyre::Result; +use color_eyre::Section; +use sn_peers_acquisition::PeersArgs; +use sn_peers_acquisition::SAFE_PEERS_ENV; + +pub async fn get_peers(peers: PeersArgs) -> Result> { + peers.get_peers().await + .wrap_err("Please provide valid Network peers to connect to") + .with_suggestion(|| format!("make sure you've provided network peers using the --peers option or the {SAFE_PEERS_ENV} env var")) + .with_suggestion(|| "a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere") +} diff --git a/autonomi-cli/src/actions/connect.rs b/autonomi-cli/src/actions/connect.rs new file mode 100644 index 0000000000..9eccb3bbfb --- /dev/null +++ b/autonomi-cli/src/actions/connect.rs @@ -0,0 +1,35 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::Client; +use autonomi::Multiaddr; +use color_eyre::eyre::bail; +use color_eyre::eyre::Result; +use indicatif::ProgressBar; +use std::time::Duration; + +pub async fn connect_to_network(peers: Vec) -> Result { + let progress_bar = ProgressBar::new_spinner(); + progress_bar.enable_steady_tick(Duration::from_millis(120)); + progress_bar.set_message("Connecting to The Autonomi Network..."); + let new_style = progress_bar.style().tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈🔗"); + progress_bar.set_style(new_style); + + progress_bar.set_message("Connecting to The Autonomi Network..."); + + match Client::connect(&peers).await { + Ok(client) => { + progress_bar.finish_with_message("Connected to the Network"); + Ok(client) + } + Err(e) => { + progress_bar.finish_with_message("Failed to connect to the network"); + bail!("Failed to connect to the network: {e}") + } + } +} diff --git a/sn_client/src/chunks.rs b/autonomi-cli/src/actions/mod.rs similarity index 80% rename from sn_client/src/chunks.rs rename to autonomi-cli/src/actions/mod.rs index 7dbcaef92b..98ef491064 100644 --- a/sn_client/src/chunks.rs +++ b/autonomi-cli/src/actions/mod.rs @@ -6,8 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -mod error; -mod pac_man; +mod connect; +mod progress_bar; -pub(crate) use self::error::{Error, Result}; -pub(crate) use pac_man::{encrypt_large, DataMapLevel}; +pub use connect::connect_to_network; diff --git a/sn_cli/src/files.rs b/autonomi-cli/src/actions/progress_bar.rs similarity index 73% rename from sn_cli/src/files.rs rename to autonomi-cli/src/actions/progress_bar.rs index 66341f4865..5e2c6c914e 100644 --- a/sn_cli/src/files.rs +++ b/autonomi-cli/src/actions/progress_bar.rs @@ -6,22 +6,11 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -mod chunk_manager; -mod download; -mod estimate; -mod files_uploader; -mod upload; - -pub use chunk_manager::ChunkManager; -pub use download::{download_file, download_files}; -pub use estimate::Estimator; -pub use files_uploader::{FilesUploadStatusNotifier, FilesUploadSummary, FilesUploader}; -pub use upload::{UploadedFile, UPLOADED_FILES}; - -use color_eyre::Result; +use color_eyre::eyre::Result; use indicatif::{ProgressBar, ProgressStyle}; use std::time::Duration; +#[allow(dead_code)] pub fn get_progress_bar(length: u64) -> Result { let progress_bar = ProgressBar::new(length); progress_bar.set_style( diff --git a/autonomi-cli/src/commands.rs b/autonomi-cli/src/commands.rs new file mode 100644 index 0000000000..4c2067aa87 --- /dev/null +++ b/autonomi-cli/src/commands.rs @@ -0,0 +1,168 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +mod file; +mod register; +mod vault; +use std::path::PathBuf; + +use clap::Subcommand; +use color_eyre::Result; + +use crate::opt::Opt; + +#[derive(Subcommand, Debug)] +pub enum SubCmd { + /// Operations related to file handling. + File { + #[command(subcommand)] + command: FileCmd, + }, + + /// Operations related to register management. + Register { + #[command(subcommand)] + command: RegisterCmd, + }, + + /// Operations related to vault management. + Vault { + #[command(subcommand)] + command: VaultCmd, + }, +} + +#[derive(Subcommand, Debug)] +pub enum FileCmd { + /// Estimate cost to upload a file. + Cost { + /// The file to estimate cost for. + file: String, + }, + + /// Upload a file and pay for it. + Upload { + /// The file to upload. + file: String, + }, + + /// Download a file from the given address. + Download { + /// The address of the file to download. + addr: String, + /// The destination file path. + dest_file: PathBuf, + }, + + /// List previous uploads + List, +} + +#[derive(Subcommand, Debug)] +pub enum RegisterCmd { + /// Generate a new register key. + GenerateKey { + /// Overwrite existing key if it exists + /// Warning: overwriting the existing key will result in loss of access to any existing registers created using that key + #[arg(short, long)] + overwrite: bool, + }, + + /// Estimate cost to register a name. + Cost { + /// The name to register. + name: String, + }, + + /// Create a new register with the given name and value. + Create { + /// The name of the register. + name: String, + /// The value to store in the register. + value: String, + /// Create the register with public write access. + #[arg(long, default_value = "false")] + public: bool, + }, + + /// Edit an existing register. + Edit { + /// Use the name of the register instead of the address + /// Note that only the owner of the register can use this shorthand as the address can be generated from the name and register key. + #[arg(short, long)] + name: bool, + /// The address of the register + /// With the name option on the address will be used as a name + address: String, + /// The new value to store in the register. + value: String, + }, + + /// Get the value of a register. + Get { + /// Use the name of the register instead of the address + /// Note that only the owner of the register can use this shorthand as the address can be generated from the name and register key. + #[arg(short, long)] + name: bool, + /// The address of the register + /// With the name option on the address will be used as a name + address: String, + }, + + /// List previous registers + List, +} + +#[derive(Subcommand, Debug)] +pub enum VaultCmd { + /// Estimate cost to create a vault. + Cost, + + /// Create a vault at a deterministic address based on your `SECRET_KEY`. + Create, + + /// Sync vault with the network, including registers and files. + Sync, +} + +pub async fn handle_subcommand(opt: Opt) -> Result<()> { + let peers = crate::access::network::get_peers(opt.peers); + let cmd = opt.command; + + match cmd { + SubCmd::File { command } => match command { + FileCmd::Cost { file } => file::cost(&file, peers.await?).await, + FileCmd::Upload { file } => file::upload(&file, peers.await?).await, + FileCmd::Download { addr, dest_file } => { + file::download(&addr, &dest_file, peers.await?).await + } + FileCmd::List => file::list(peers.await?), + }, + SubCmd::Register { command } => match command { + RegisterCmd::GenerateKey { overwrite } => register::generate_key(overwrite), + RegisterCmd::Cost { name } => register::cost(&name, peers.await?).await, + RegisterCmd::Create { + name, + value, + public, + } => register::create(&name, &value, public, peers.await?).await, + RegisterCmd::Edit { + address, + name, + value, + } => register::edit(address, name, &value, peers.await?).await, + RegisterCmd::Get { address, name } => register::get(address, name, peers.await?).await, + RegisterCmd::List => register::list(peers.await?), + }, + SubCmd::Vault { command } => match command { + VaultCmd::Cost => vault::cost(peers.await?), + VaultCmd::Create => vault::create(peers.await?), + VaultCmd::Sync => vault::sync(peers.await?), + }, + } +} diff --git a/autonomi-cli/src/commands/file.rs b/autonomi-cli/src/commands/file.rs new file mode 100644 index 0000000000..bfa4719460 --- /dev/null +++ b/autonomi-cli/src/commands/file.rs @@ -0,0 +1,85 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::utils::collect_upload_summary; +use autonomi::client::address::addr_to_str; +use autonomi::client::address::str_to_addr; +use autonomi::Multiaddr; +use color_eyre::eyre::Context; +use color_eyre::eyre::Result; +use std::path::Path; +use std::path::PathBuf; + +pub async fn cost(file: &str, peers: Vec) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + + println!("Getting upload cost..."); + let cost = client + .file_cost(&PathBuf::from(file)) + .await + .wrap_err("Failed to calculate cost for file")?; + + println!("Estimate cost to upload file: {file}"); + println!("Total cost: {cost}"); + Ok(()) +} +pub async fn upload(path: &str, peers: Vec) -> Result<()> { + let wallet = crate::keys::load_evm_wallet()?; + let mut client = crate::actions::connect_to_network(peers).await?; + let event_receiver = client.enable_client_events(); + let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); + + let path = PathBuf::from(path); + + let xor_name = if path.is_dir() { + println!("Uploading directory: {path:?}"); + info!("Uploading directory: {path:?}"); + client + .dir_upload(&path, &wallet) + .await + .wrap_err("Failed to upload directory")? + } else { + println!("Uploading file: {path:?}"); + info!("Uploading file: {path:?}"); + client + .file_upload(&path, &wallet) + .await + .wrap_err("Failed to upload file")? + }; + + let addr = addr_to_str(xor_name); + + println!("Successfully uploaded: {path:?}"); + println!("At address: {addr}"); + info!("Successfully uploaded: {path:?} at address: {addr}"); + if let Ok(()) = upload_completed_tx.send(()) { + let summary = upload_summary_thread.await?; + if summary.record_count == 0 { + println!("All chunks already exist on the network"); + } else { + println!("Number of chunks uploaded: {}", summary.record_count); + println!("Total cost: {} AttoTokens", summary.tokens_spent); + } + info!("Summary for upload of data {path:?} at {addr:?}: {summary:?}"); + } + + Ok(()) +} +pub async fn download(addr: &str, dest_path: &Path, peers: Vec) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + let address = str_to_addr(addr).wrap_err("Failed to parse data address")?; + + client.download_file_or_dir(address, dest_path).await?; + + Ok(()) +} + +pub fn list(_peers: Vec) -> Result<()> { + println!("The file list feature is coming soon!"); + Ok(()) +} diff --git a/autonomi-cli/src/commands/register.rs b/autonomi-cli/src/commands/register.rs new file mode 100644 index 0000000000..d559e6cc55 --- /dev/null +++ b/autonomi-cli/src/commands/register.rs @@ -0,0 +1,175 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::utils::collect_upload_summary; +use autonomi::client::registers::RegisterAddress; +use autonomi::client::registers::RegisterPermissions; +use autonomi::client::registers::RegisterSecretKey; +use autonomi::Client; +use autonomi::Multiaddr; +use color_eyre::eyre::eyre; +use color_eyre::eyre::Context; +use color_eyre::eyre::Result; +use color_eyre::Section; + +pub fn generate_key(overwrite: bool) -> Result<()> { + // check if the key already exists + let key_path = crate::keys::get_register_signing_key_path()?; + if key_path.exists() && !overwrite { + return Err(eyre!("Register key already exists at: {}", key_path.display())) + .with_suggestion(|| "if you want to overwrite the existing key, run the command with the --overwrite flag") + .with_warning(|| "overwriting the existing key might result in loss of access to any existing registers created using that key"); + } + + // generate and write a new key to file + let key = RegisterSecretKey::random(); + let path = crate::keys::create_register_signing_key_file(key) + .wrap_err("Failed to create new register key")?; + println!("✅ Created new register key at: {}", path.display()); + Ok(()) +} + +pub async fn cost(name: &str, peers: Vec) -> Result<()> { + let register_key = crate::keys::get_register_signing_key() + .wrap_err("The register key is required to perform this action")?; + let client = crate::actions::connect_to_network(peers).await?; + + let cost = client + .register_cost(name.to_string(), register_key) + .await + .wrap_err("Failed to get cost for register")?; + println!("✅ The estimated cost to create a register with name {name} is: {cost}"); + Ok(()) +} + +pub async fn create(name: &str, value: &str, public: bool, peers: Vec) -> Result<()> { + let wallet = crate::keys::load_evm_wallet()?; + let register_key = crate::keys::get_register_signing_key() + .wrap_err("The register key is required to perform this action")?; + let mut client = crate::actions::connect_to_network(peers).await?; + let event_receiver = client.enable_client_events(); + let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); + + println!("Creating register with name: {name}"); + let register = if public { + println!("With public write access"); + let permissions = RegisterPermissions::new_anyone_can_write(); + client + .register_create_with_permissions( + value.as_bytes().to_vec().into(), + name, + register_key, + permissions, + &wallet, + ) + .await + .wrap_err("Failed to create register")? + } else { + println!("With private write access"); + client + .register_create( + value.as_bytes().to_vec().into(), + name, + register_key, + &wallet, + ) + .await + .wrap_err("Failed to create register")? + }; + + let address = register.address(); + + println!("✅ Register created at address: {address}"); + println!("With name: {name}"); + println!("And initial value: [{value}]"); + + if let Ok(()) = upload_completed_tx.send(()) { + let summary = upload_summary_thread.await?; + if summary.record_count == 0 { + println!("The register was already created on the network. No tokens were spent."); + } else { + println!("Total cost: {} AttoTokens", summary.tokens_spent); + } + } + + Ok(()) +} + +pub async fn edit(address: String, name: bool, value: &str, peers: Vec) -> Result<()> { + let register_key = crate::keys::get_register_signing_key() + .wrap_err("The register key is required to perform this action")?; + let client = crate::actions::connect_to_network(peers).await?; + + let address = if name { + Client::register_address(&address, ®ister_key) + } else { + RegisterAddress::from_hex(&address) + .wrap_err(format!("Failed to parse register address: {address}")) + .with_suggestion(|| { + "if you want to use the name as the address, run the command with the --name flag" + })? + }; + + println!("Getting register at address: {address}"); + let register = client + .register_get(address) + .await + .wrap_err(format!("Failed to get register at address: {address}"))?; + println!("Found register at address: {address}"); + + println!("Updating register with new value: {value}"); + client + .register_update(register, value.as_bytes().to_vec().into(), register_key) + .await + .wrap_err(format!("Failed to update register at address: {address}"))?; + + println!("✅ Successfully updated register"); + println!("With value: [{value}]"); + + Ok(()) +} + +pub async fn get(address: String, name: bool, peers: Vec) -> Result<()> { + let register_key = crate::keys::get_register_signing_key() + .wrap_err("The register key is required to perform this action")?; + let client = crate::actions::connect_to_network(peers).await?; + + let address = if name { + Client::register_address(&address, ®ister_key) + } else { + RegisterAddress::from_hex(&address) + .wrap_err(format!("Failed to parse register address: {address}")) + .with_suggestion(|| { + "if you want to use the name as the address, run the command with the --name flag" + })? + }; + + println!("Getting register at address: {address}"); + let register = client + .register_get(address) + .await + .wrap_err(format!("Failed to get register at address: {address}"))?; + let values = register.values(); + + println!("✅ Register found at address: {address}"); + match values.as_slice() { + [one] => println!("With value: [{:?}]", String::from_utf8_lossy(one)), + _ => { + println!("With multiple concurrent values:"); + for value in values.iter() { + println!("[{:?}]", String::from_utf8_lossy(value)); + } + } + } + Ok(()) +} + +pub fn list(_peers: Vec) -> Result<()> { + println!("The register feature is coming soon!"); + Ok(()) +} diff --git a/autonomi-cli/src/commands/vault.rs b/autonomi-cli/src/commands/vault.rs new file mode 100644 index 0000000000..9a8d708824 --- /dev/null +++ b/autonomi-cli/src/commands/vault.rs @@ -0,0 +1,25 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::Multiaddr; +use color_eyre::eyre::Result; + +pub fn cost(_peers: Vec) -> Result<()> { + println!("The vault feature is coming soon!"); + Ok(()) +} + +pub fn create(_peers: Vec) -> Result<()> { + println!("The vault feature is coming soon!"); + Ok(()) +} + +pub fn sync(_peers: Vec) -> Result<()> { + println!("The vault feature is coming soon!"); + Ok(()) +} diff --git a/autonomi-cli/src/main.rs b/autonomi-cli/src/main.rs new file mode 100644 index 0000000000..de4cdcf4c4 --- /dev/null +++ b/autonomi-cli/src/main.rs @@ -0,0 +1,68 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#[macro_use] +extern crate tracing; + +mod access; +mod actions; +mod commands; +mod opt; +mod utils; + +pub use access::data_dir; +pub use access::keys; +pub use access::network; + +use clap::Parser; +use color_eyre::Result; + +use opt::Opt; +#[cfg(feature = "metrics")] +use sn_logging::metrics::init_metrics; +use sn_logging::{LogBuilder, LogFormat, ReloadHandle, WorkerGuard}; +use tracing::Level; + +#[tokio::main] +async fn main() -> Result<()> { + color_eyre::install().expect("Failed to initialise error handler"); + let opt = Opt::parse(); + let _log_guards = init_logging_and_metrics(&opt)?; + #[cfg(feature = "metrics")] + tokio::spawn(init_metrics(std::process::id())); + + // Log the full command that was run and the git version + info!("\"{}\"", std::env::args().collect::>().join(" ")); + let version = sn_build_info::git_info(); + info!("autonomi client built with git version: {version}"); + println!("autonomi client built with git version: {version}"); + + commands::handle_subcommand(opt).await?; + + Ok(()) +} + +fn init_logging_and_metrics(opt: &Opt) -> Result<(ReloadHandle, Option)> { + let logging_targets = vec![ + ("autonomi-cli".to_string(), Level::TRACE), + ("autonomi".to_string(), Level::TRACE), + ("evmlib".to_string(), Level::TRACE), + ("sn_evm".to_string(), Level::TRACE), + ("sn_networking".to_string(), Level::INFO), + ("sn_build_info".to_string(), Level::TRACE), + ("sn_logging".to_string(), Level::TRACE), + ("sn_peers_acquisition".to_string(), Level::TRACE), + ("sn_protocol".to_string(), Level::TRACE), + ("sn_registers".to_string(), Level::TRACE), + ]; + let mut log_builder = LogBuilder::new(logging_targets); + log_builder.output_dest(opt.log_output_dest.clone()); + log_builder.format(opt.log_format.unwrap_or(LogFormat::Default)); + let guards = log_builder.initialize()?; + Ok(guards) +} diff --git a/sn_cli/src/bin/subcommands/mod.rs b/autonomi-cli/src/opt.rs similarity index 68% rename from sn_cli/src/bin/subcommands/mod.rs rename to autonomi-cli/src/opt.rs index 7a7ba11cad..8f3fb20967 100644 --- a/sn_cli/src/bin/subcommands/mod.rs +++ b/autonomi-cli/src/opt.rs @@ -6,17 +6,14 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -pub(crate) mod files; -pub(crate) mod folders; -pub(crate) mod register; -pub(crate) mod wallet; +use std::time::Duration; use clap::Parser; -use clap::Subcommand; use color_eyre::Result; use sn_logging::{LogFormat, LogOutputDest}; use sn_peers_acquisition::PeersArgs; -use std::time::Duration; + +use crate::commands::SubCmd; // Please do not remove the blank lines in these doc comments. // They are used for inserting line breaks when the help menu is rendered in the UI. @@ -33,6 +30,7 @@ pub(crate) struct Opt { /// - Linux: $HOME/.local/share/safe/client/logs /// - macOS: $HOME/Library/Application Support/safe/client/logs /// - Windows: C:\Users\\AppData\Roaming\safe\client\logs + #[allow(rustdoc::invalid_html_tags)] #[clap(long, value_parser = LogOutputDest::parse_from_str, verbatim_doc_comment, default_value = "data-dir")] pub log_output_dest: LogOutputDest, @@ -49,7 +47,7 @@ pub(crate) struct Opt { /// Available sub commands. #[clap(subcommand)] - pub cmd: SubCmd, + pub command: SubCmd, /// The maximum duration to wait for a connection to the network before timing out. #[clap(long = "timeout", global = true, value_parser = |t: &str| -> Result { Ok(t.parse().map(Duration::from_secs)?) })] @@ -61,25 +59,3 @@ pub(crate) struct Opt { #[clap(global = true, long = "no-verify", short = 'x')] pub no_verify: bool, } - -#[derive(Subcommand, Debug)] -pub(super) enum SubCmd { - #[clap(name = "wallet", subcommand)] - /// Commands for a hot-wallet management. - /// A hot-wallet holds the secret key, thus it can be used for signing transfers/transactions. - Wallet(wallet::hot_wallet::WalletCmds), - #[clap(name = "wowallet", subcommand)] - /// Commands for watch-only wallet management - /// A watch-only wallet holds only the public key, thus it cannot be used for signing - /// transfers/transactions, but only to query balances and broadcast offline signed transactions. - WatchOnlyWallet(wallet::wo_wallet::WatchOnlyWalletCmds), - #[clap(name = "files", subcommand)] - /// Commands for file management - Files(files::FilesCmds), - #[clap(name = "folders", subcommand)] - /// Commands for folders management - Folders(folders::FoldersCmds), - #[clap(name = "register", subcommand)] - /// Commands for register management - Register(register::RegisterCmds), -} diff --git a/autonomi-cli/src/utils.rs b/autonomi-cli/src/utils.rs new file mode 100644 index 0000000000..5f031a3c24 --- /dev/null +++ b/autonomi-cli/src/utils.rs @@ -0,0 +1,56 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::client::{Amount, ClientEvent, UploadSummary}; + +/// Collects upload summary from the event receiver. +/// Send a signal to the returned sender to stop collecting and to return the result via the join handle. +pub fn collect_upload_summary( + mut event_receiver: tokio::sync::mpsc::Receiver, +) -> ( + tokio::task::JoinHandle, + tokio::sync::oneshot::Sender<()>, +) { + let (upload_completed_tx, mut upload_completed_rx) = tokio::sync::oneshot::channel::<()>(); + let stats_thread = tokio::spawn(async move { + let mut tokens_spent: Amount = Amount::from(0); + let mut record_count = 0; + + loop { + tokio::select! { + event = event_receiver.recv() => { + match event { + Some(ClientEvent::UploadComplete(upload_summary)) => { + tokens_spent += upload_summary.tokens_spent; + record_count += upload_summary.record_count; + } + None => break, + } + } + _ = &mut upload_completed_rx => break, + } + } + + // try to drain the event receiver in case there are any more events + while let Ok(event) = event_receiver.try_recv() { + match event { + ClientEvent::UploadComplete(upload_summary) => { + tokens_spent += upload_summary.tokens_spent; + record_count += upload_summary.record_count; + } + } + } + + UploadSummary { + tokens_spent, + record_count, + } + }); + + (stats_thread, upload_completed_tx) +} diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 3c3bef4c6d..617452db53 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,43 +3,76 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.1.2" +version = "0.2.0" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" +[lib] +crate-type = ["cdylib", "rlib"] + [features] -default = [] -full = ["data", "files", "fs", "registers", "transfers"] -data = ["transfers"] -files = ["transfers"] -fs = [] -local = ["sn_client/local-discovery"] -registers = ["transfers"] -transfers = [] +default = ["data"] +full = ["data", "registers", "vault"] +data = [] +vault = ["data"] +fs = ["tokio/fs", "data"] +local = ["sn_networking/local", "test_utils/local", "sn_evm/local"] +registers = ["data"] +loud = [] [dependencies] bip39 = "2.0.0" bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } -libp2p = "0.53" +curv = { version = "0.10.1", package = "sn_curv", default-features = false, features = [ + "num-bigint", +] } +eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } +const-hex = "1.12.0" +hex = "~0.4.3" +libp2p = "0.54.1" rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_client = { path = "../sn_client", version = "0.110.4" } -sn_protocol = { version = "0.17.11", path = "../sn_protocol" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } +sn_networking = { path = "../sn_networking", version = "0.19.0" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_protocol = { version = "0.17.12", path = "../sn_protocol" } +sn_registers = { path = "../sn_registers", version = "0.4.0" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } thiserror = "1.0.23" -tokio = { version = "1.35.0", features = ["sync", "fs"] } +tokio = { version = "1.35.0", features = ["sync"] } tracing = { version = "~0.1.26" } walkdir = "2.5.0" xor_name = "5.0.0" +futures = "0.3.30" +wasm-bindgen = "0.2.93" +wasm-bindgen-futures = "0.4.43" +serde-wasm-bindgen = "0.6.5" [dev-dependencies] +eyre = "0.6.5" +sha2 = "0.10.6" +sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +# Do not specify the version field. Release process expects even the local dev deps to be published. +# Removing the version field is a workaround. +test_utils = { path = "../test_utils" } +tiny_http = "0.11" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +wasm-bindgen-test = "0.3.43" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +console_error_panic_hook = "0.1.7" +evmlib = { path = "../evmlib", version = "0.1.1", features = ["wasm-bindgen"] } +# See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available +instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } +js-sys = "0.3.70" +test_utils = { path = "../test_utils" } tracing-subscriber = { version = "0.3", features = ["env-filter"] } +tracing-web = "0.1.3" [lints] workspace = true diff --git a/autonomi/README.md b/autonomi/README.md index e3b2291766..3b27c6b0f0 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -7,19 +7,114 @@ Connect to and build on the Autonomi network. ## Usage -See [docs.rs/autonomi](https://docs.rs/autonomi) for usage examples. +Add the autonomi crate to your `Cargo.toml`: + +```toml +[dependencies] +autonomi = { path = "../autonomi", version = "0.1.0" } +``` ## Running tests -Run a local network with the `local-discovery` feature: +### Using a local EVM testnet + +1. If you haven't, install Foundry, to be able to run Anvil + nodes: https://book.getfoundry.sh/getting-started/installation +2. Run a local EVM node: + +```sh +cargo run --bin evm_testnet +``` + +3. Run a local network with the `local` feature and use the local evm node. + +```sh +cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address evm-local +``` + +4. Then run the tests with the `local` feature and pass the EVM params again: + +```sh +$ EVM_NETWORK=local cargo test --package=autonomi --features=local +# Or with logs +$ RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local -- --nocapture +``` + +### Using a live testnet or mainnet + +Using the hardcoded `Arbitrum One` option as an example, but you can also use the command flags of the steps above and +point it to a live network. + +1. Run a local network with the `local` feature: ```sh -cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean +cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address evm-arbitrum-one ``` -Then run the tests with the `local` feature: +2. Then run the tests with the `local` feature. Make sure that the wallet of the private key you pass has enough gas and + payment tokens on the network (in this case Arbitrum One): + ```sh -$ cargo test --package=autonomi --features=local +$ EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi cargo test --package=autonomi --features=local -- --nocapture +$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture +``` + +### WebAssembly + +To run a WASM test +- Install `wasm-pack` +- Make sure your Rust supports the `wasm32-unknown-unknown` target. (If you have `rustup`: `rustup target add wasm32-unknown-unknown`.) +- Pass a bootstrap peer via `SAFE_PEERS`. This *has* to be the websocket address, e.g. `/ip4//tcp//ws/p2p/`. + - As well as the other environment variables needed for EVM payments (e.g. `RPC_URL`). +- Optionally specify the specific test, e.g. `-- put` to run `put()` in `wasm.rs` only. + +Example: +````sh +SAFE_PEERS=/ip4//tcp//ws/p2p/ wasm-pack test --release --firefox autonomi --features=data,files --test wasm -- put +``` + + +## Faucet (local) + +There is no faucet server, but instead you can use the `Deployer wallet private key` printed in the EVM node output to +initialise a wallet from with almost infinite gas and payment tokens. Example: + +```rust +let rpc_url = "http://localhost:54370/"; +let payment_token_address = "0x5FbDB2315678afecb367f032d93F642f64180aa3"; +let data_payments_address = "0x8464135c8F25Da09e49BC8782676a84730C318bC"; +let private_key = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + +let network = Network::Custom(CustomNetwork::new( +rpc_url, +payment_token_address, +data_payments_address, +)); + +let deployer_wallet = Wallet::new_from_private_key(network, private_key).unwrap(); +let receiving_wallet = Wallet::new_with_random_wallet(network); + +// Send 10 payment tokens (atto) +let _ = deployer_wallet +.transfer_tokens(receiving_wallet.address(), Amount::from(10)) +.await; ``` + +Alternatively, you can provide the wallet address that should own all the gas and payment tokens to the EVM testnet +startup command using the `--genesis-wallet` flag: + +```sh +cargo run --bin evm_testnet -- --genesis-wallet +``` + +```shell +************************* +* Ethereum node started * +************************* +RPC URL: http://localhost:60093/ +Payment token address: 0x5FbDB2315678afecb367f032d93F642f64180aa3 +Chunk payments address: 0x8464135c8F25Da09e49BC8782676a84730C318bC +Deployer wallet private key: 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 +Genesis wallet balance: (tokens: 20000000000000000000000000, gas: 9998998011366954730202) +``` \ No newline at end of file diff --git a/autonomi/WASM_docs.md b/autonomi/WASM_docs.md new file mode 100644 index 0000000000..995809b8bd --- /dev/null +++ b/autonomi/WASM_docs.md @@ -0,0 +1,170 @@ +## JavaScript Autonomi API Documentation + +Note that this is a first version and will be subject to change. + +### **Client** + +The `Client` object allows interaction with the network to store and retrieve data. Below are the available methods for the `Client` class. + +#### **Constructor** + +```javascript +let client = await new Client([multiaddress]); +``` + +- **multiaddress** (Array of Strings): A list of network addresses for the client to connect to. + +Example: +```javascript +let client = await new Client(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); +``` + +#### **Methods** + +##### **put(data, wallet)** + +Uploads a piece of encrypted data to the network. + +```javascript +let result = await client.put(data, wallet); +``` + +- **data** (Uint8Array): The data to be stored. +- **wallet** (Wallet): The wallet used to pay for the storage. + +Returns: +- **result** (XorName): The XOR address of the stored data. + +Example: +```javascript +let wallet = getFundedWallet(); +let data = new Uint8Array([1, 2, 3]); +let result = await client.put(data, wallet); +``` + +##### **get(data_map_addr)** + +Fetches encrypted data from the network using its XOR address. + +```javascript +let data = await client.get(data_map_addr); +``` + +- **data_map_addr** (XorName): The XOR address of the data to fetch. + +Returns: +- **data** (Uint8Array): The fetched data. + +Example: +```javascript +let data = await client.get(result); +``` + +##### **cost(data)** + +Gets the cost of storing the provided data on the network. + +```javascript +let cost = await client.cost(data); +``` + +- **data** (Uint8Array): The data whose storage cost you want to calculate. + +Returns: +- **cost** (AttoTokens): The calculated cost for storing the data. + +Example: +```javascript +let cost = await client.cost(new Uint8Array([1, 2, 3])); +``` + +--- + +### **Wallet** + +The `Wallet` object represents an Ethereum wallet used for data payments. + +#### **Methods** + +##### **new_from_private_key(network, private_key)** + +Creates a new wallet using the given private key. + +```javascript +let wallet = Wallet.new_from_private_key(network, private_key); +``` + +- **network** (EvmNetwork): The network to which the wallet connects. +- **private_key** (String): The private key of the wallet. + +Returns: +- **wallet** (Wallet): The created wallet. + +Example: +```javascript +let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); +``` + +##### **address()** + +Gets the wallet’s address. + +```javascript +let address = wallet.address(); +``` + +Returns: +- **address** (Address): The wallet's address. + +Example: +```javascript +let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); +let address = wallet.address(); +``` + +--- + +### **EvmNetwork** + +The `EvmNetwork` object represents the blockchain network. + +#### **Methods** + +##### **default()** + +Connects to the default network. + +```javascript +let network = EvmNetwork.default(); +``` + +Returns: +- **network** (EvmNetwork): The default network. + +Example: +```javascript +let network = EvmNetwork.default(); +``` + +--- + +### Example Usage: + +```javascript +let client = await new Client(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); +console.log("connected"); + +let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); +console.log("wallet retrieved"); + +let data = new Uint8Array([1, 2, 3]); +let result = await client.put(data, wallet); +console.log("Data stored at:", result); + +let fetchedData = await client.get(result); +console.log("Data retrieved:", fetchedData); +``` + +--- + +This documentation covers the basic usage of `Client`, `Wallet`, and `EvmNetwork` types in the JavaScript API. \ No newline at end of file diff --git a/autonomi/index.html b/autonomi/index.html new file mode 100644 index 0000000000..bd806016ca --- /dev/null +++ b/autonomi/index.html @@ -0,0 +1,48 @@ + + + + + + + + + + + + diff --git a/autonomi/src/client/address.rs b/autonomi/src/client/address.rs new file mode 100644 index 0000000000..f314952f9c --- /dev/null +++ b/autonomi/src/client/address.rs @@ -0,0 +1,48 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use xor_name::XorName; + +#[derive(Debug, thiserror::Error)] +pub enum DataError { + #[error("Invalid XorName")] + InvalidXorName, + #[error("Input address is not a hex string")] + InvalidHexString, +} + +pub fn str_to_addr(addr: &str) -> Result { + let bytes = hex::decode(addr).map_err(|err| { + error!("Failed to decode hex string: {err:?}"); + DataError::InvalidHexString + })?; + let xor = XorName(bytes.try_into().map_err(|err| { + error!("Failed to convert bytes to XorName: {err:?}"); + DataError::InvalidXorName + })?); + Ok(xor) +} + +pub fn addr_to_str(addr: XorName) -> String { + hex::encode(addr) +} + +#[cfg(test)] +mod test { + use super::*; + use xor_name::XorName; + + #[test] + fn test_xorname_to_str() { + let rng = &mut rand::thread_rng(); + let xorname = XorName::random(rng); + let str = addr_to_str(xorname); + let xorname2 = str_to_addr(&str).expect("Failed to convert back to xorname"); + assert_eq!(xorname, xorname2); + } +} diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs new file mode 100644 index 0000000000..f38ca24cbc --- /dev/null +++ b/autonomi/src/client/archive.rs @@ -0,0 +1,66 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::{collections::HashMap, path::PathBuf}; + +use super::{ + data::DataAddr, + data::{GetError, PutError}, + Client, +}; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use sn_evm::EvmWallet; +use xor_name::XorName; + +/// The address of an archive on the network. Points to an [`Archive`]. +pub type ArchiveAddr = XorName; + +/// An archive of files that containing file paths, their metadata and the files data addresses +/// Using archives is useful for uploading entire directories to the network, only needing to keep track of a single address. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Archive { + pub map: HashMap, +} + +impl Archive { + /// Deserialize from bytes. + pub fn from_bytes(data: &Bytes) -> Result { + let root: Archive = rmp_serde::from_slice(&data[..])?; + + Ok(root) + } + + /// Serialize to bytes. + pub fn into_bytes(&self) -> Result { + let root_serialized = rmp_serde::to_vec(&self)?; + let root_serialized = Bytes::from(root_serialized); + + Ok(root_serialized) + } +} + +impl Client { + /// Fetch an archive from the network + pub async fn archive_get(&self, addr: ArchiveAddr) -> Result { + let data = self.data_get(addr).await?; + Ok(Archive::from_bytes(&data)?) + } + + /// Upload an archive to the network + pub async fn archive_put( + &self, + archive: Archive, + wallet: &EvmWallet, + ) -> Result { + let bytes = archive + .into_bytes() + .map_err(|e| PutError::Serialization(format!("Failed to serialize archive: {e:?}")))?; + self.data_put(bytes, wallet).await + } +} diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 3522da2251..055016f291 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -1,56 +1,63 @@ -use std::collections::{BTreeMap, HashSet}; +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. -use crate::self_encryption::{encrypt, DataMapLevel}; -use crate::Client; use bytes::Bytes; -use libp2p::{ - kad::{Quorum, Record}, - PeerId, -}; -use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; -use sn_client::{ - networking::{GetRecordCfg, NetworkError, PutRecordCfg}, - transfers::{HotWallet, MainPubkey, NanoTokens, PaymentQuote}, - StoragePaymentResult, -}; +use libp2p::kad::Quorum; +use tokio::task::JoinError; + +use std::collections::HashSet; +use xor_name::XorName; + +use crate::client::{ClientEvent, UploadSummary}; +use crate::{self_encryption::encrypt, Client}; +use sn_evm::{Amount, AttoTokens}; +use sn_evm::{EvmWallet, EvmWalletError}; +use sn_networking::{GetRecordCfg, NetworkError}; use sn_protocol::{ - storage::{ - try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind, - }, + storage::{try_deserialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind}, NetworkAddress, }; -use sn_transfers::Payment; -use tokio::task::{JoinError, JoinSet}; -use xor_name::XorName; -use super::transfers::SendSpendsError; +/// Raw Data Address (points to a DataMap) +pub type DataAddr = XorName; +/// Raw Chunk Address (points to a [`Chunk`]) +pub type ChunkAddr = XorName; /// Errors that can occur during the put operation. #[derive(Debug, thiserror::Error)] pub enum PutError { #[error("Failed to self-encrypt data.")] SelfEncryption(#[from] crate::self_encryption::Error), - #[error("Error serializing data.")] - Serialization, + #[error("Error getting Vault XorName data.")] + VaultXorName, #[error("A network error occurred.")] Network(#[from] NetworkError), - #[error("A wallet error occurred.")] - Wallet(#[from] sn_transfers::WalletError), #[error("Error occurred during payment.")] PayError(#[from] PayError), + #[error("Failed to serialize {0}")] + Serialization(String), + #[error("A wallet error occurred.")] + Wallet(#[from] sn_evm::EvmError), } /// Errors that can occur during the pay operation. #[derive(Debug, thiserror::Error)] pub enum PayError { + #[error("Could not get store quote for: {0:?} after several retries")] + CouldNotGetStoreQuote(XorName), #[error("Could not get store costs: {0:?}")] - CouldNotGetStoreCosts(sn_client::networking::NetworkError), + CouldNotGetStoreCosts(NetworkError), #[error("Could not simultaneously fetch store costs: {0:?}")] JoinError(JoinError), - #[error("Hot wallet error")] - WalletError(#[from] sn_transfers::WalletError), - #[error("Failed to send spends")] - SendSpendsError(#[from] SendSpendsError), + #[error("Wallet error: {0:?}")] + EvmWalletError(#[from] EvmWalletError), + #[error("Failed to self-encrypt data.")] + SelfEncryption(#[from] crate::self_encryption::Error), } /// Errors that can occur during the get operation. @@ -60,17 +67,19 @@ pub enum GetError { InvalidDataMap(rmp_serde::decode::Error), #[error("Failed to decrypt data.")] Decryption(crate::self_encryption::Error), + #[error("Failed to deserialize")] + Deserialization(#[from] rmp_serde::decode::Error), #[error("General networking error: {0:?}")] - Network(#[from] sn_client::networking::NetworkError), + Network(#[from] NetworkError), #[error("General protocol error: {0:?}")] - Protocol(#[from] sn_client::protocol::Error), + Protocol(#[from] sn_protocol::Error), } impl Client { - /// Fetch a piece of self-encrypted data from the network, by its data map - /// XOR address. - pub async fn get(&self, data_map_addr: XorName) -> Result { - let data_map_chunk = self.fetch_chunk(data_map_addr).await?; + /// Fetch a blob of data from the network + pub async fn data_get(&self, addr: DataAddr) -> Result { + info!("Fetching data from Data Address: {addr:?}"); + let data_map_chunk = self.chunk_get(addr).await?; let data = self .fetch_from_data_map_chunk(data_map_chunk.value()) .await?; @@ -78,247 +87,131 @@ impl Client { Ok(data) } - /// Get a raw chunk from the network. - pub async fn fetch_chunk(&self, addr: XorName) -> Result { - tracing::info!("Getting chunk: {addr:?}"); - let key = NetworkAddress::from_chunk_address(ChunkAddress::new(addr)).to_record_key(); - - let get_cfg = GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: None, - target_record: None, - expected_holders: HashSet::new(), - }; - let record = self.network.get_record_from_network(key, &get_cfg).await?; - let header = RecordHeader::from_record(&record)?; - if let RecordKind::Chunk = header.kind { - let chunk: Chunk = try_deserialize_record(&record)?; - Ok(chunk) - } else { - Err(NetworkError::RecordKindMismatch(RecordKind::Chunk).into()) - } - } + /// Upload a piece of data to the network. This data will be self-encrypted. + /// Returns the Data Address at which the data was stored. + pub async fn data_put(&self, data: Bytes, wallet: &EvmWallet) -> Result { + let now = sn_networking::target_arch::Instant::now(); + let (data_map_chunk, chunks) = encrypt(data)?; + info!( + "Uploading datamap chunk to the network at: {:?}", + data_map_chunk.address() + ); - /// Upload a piece of data to the network. This data will be self-encrypted, - /// and the data map XOR address will be returned. - pub async fn put(&mut self, data: Bytes, wallet: &mut HotWallet) -> Result { - let now = std::time::Instant::now(); - let (map, chunks) = encrypt(data)?; - tracing::debug!("Encryption took: {:.2?}", now.elapsed()); + debug!("Encryption took: {:.2?}", now.elapsed()); - let map_xor_name = *map.address().xorname(); + let map_xor_name = *data_map_chunk.address().xorname(); + let mut xor_names = vec![map_xor_name]; - let mut xor_names = vec![]; - xor_names.push(map_xor_name); for chunk in &chunks { xor_names.push(*chunk.name()); } - let StoragePaymentResult { skipped_chunks, .. } = - self.pay(xor_names.into_iter(), wallet).await?; - - // TODO: Upload in parallel - if !skipped_chunks.contains(map.name()) { - self.upload_chunk(map, wallet).await?; + // Pay for all chunks + data map chunk + info!("Paying for {} addresses", xor_names.len()); + let (payment_proofs, _free_chunks) = self + .pay(xor_names.into_iter(), wallet) + .await + .inspect_err(|err| error!("Error paying for data: {err:?}"))?; + + let mut record_count = 0; + + // Upload data map + if let Some(proof) = payment_proofs.get(&map_xor_name) { + debug!("Uploading data map chunk: {map_xor_name:?}"); + self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone()) + .await + .inspect_err(|err| error!("Error uploading data map chunk: {err:?}"))?; + record_count += 1; } + + // Upload the rest of the chunks + debug!("Uploading {} chunks", chunks.len()); for chunk in chunks { - if skipped_chunks.contains(chunk.name()) { - continue; + if let Some(proof) = payment_proofs.get(chunk.name()) { + let address = *chunk.address(); + self.chunk_upload_with_payment(chunk, proof.clone()) + .await + .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?; + record_count += 1; } - self.upload_chunk(chunk, wallet).await?; } - Ok(map_xor_name) - } - - // Fetch and decrypt all chunks in the data map. - async fn fetch_from_data_map(&self, data_map: &DataMap) -> Result { - let mut encrypted_chunks = vec![]; - for info in data_map.infos() { - let chunk = self.fetch_chunk(info.dst_hash).await?; - let chunk = EncryptedChunk { - index: info.index, - content: chunk.value, - }; - encrypted_chunks.push(chunk); - } - - let data = decrypt_full_set(data_map, &encrypted_chunks) - .map_err(|e| GetError::Decryption(crate::self_encryption::Error::SelfEncryption(e)))?; - - Ok(data) - } - - // Unpack a wrapped data map and fetch all bytes using self-encryption. - async fn fetch_from_data_map_chunk(&self, data_map_bytes: &Bytes) -> Result { - let mut data_map_level: DataMapLevel = - rmp_serde::from_slice(data_map_bytes).map_err(GetError::InvalidDataMap)?; - - loop { - let data_map = match &data_map_level { - DataMapLevel::First(map) => map, - DataMapLevel::Additional(map) => map, - }; - - let data = self.fetch_from_data_map(data_map).await?; + if let Some(channel) = self.client_event_sender.as_ref() { + let tokens_spent = payment_proofs + .values() + .map(|proof| proof.quote.cost.as_atto()) + .sum::(); - match &data_map_level { - DataMapLevel::First(_) => break Ok(data), - DataMapLevel::Additional(_) => { - data_map_level = - rmp_serde::from_slice(&data).map_err(GetError::InvalidDataMap)?; - continue; - } + let summary = UploadSummary { + record_count, + tokens_spent, }; - } - } - - pub(crate) async fn pay( - &mut self, - content_addrs: impl Iterator, - wallet: &mut HotWallet, - ) -> Result { - let mut tasks = JoinSet::new(); - for content_addr in content_addrs { - let network = self.network.clone(); - tasks.spawn(async move { - // TODO: retry, but where? - let cost = network - .get_store_costs_from_network( - NetworkAddress::from_chunk_address(ChunkAddress::new(content_addr)), - vec![], - ) - .await - .map_err(PayError::CouldNotGetStoreCosts); - - tracing::debug!("Storecosts retrieved for {content_addr:?} {cost:?}"); - (content_addr, cost) - }); - } - tracing::debug!("Pending store cost tasks: {:?}", tasks.len()); - - // collect store costs - let mut cost_map = BTreeMap::default(); - let mut skipped_chunks = vec![]; - while let Some(res) = tasks.join_next().await { - match res { - Ok((content_addr, Ok(cost))) => { - if cost.2.cost == NanoTokens::zero() { - skipped_chunks.push(content_addr); - tracing::debug!("Skipped existing chunk {content_addr:?}"); - } else { - tracing::debug!("Storecost inserted into payment map for {content_addr:?}"); - let _ = cost_map.insert(content_addr, (cost.1, cost.2, cost.0.to_bytes())); - } - } - Ok((content_addr, Err(err))) => { - tracing::warn!("Cannot get store cost for {content_addr:?} with error {err:?}"); - return Err(err); - } - Err(e) => { - return Err(PayError::JoinError(e)); - } + if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { + error!("Failed to send client event: {err:?}"); } } - let (storage_cost, royalty_fees) = if cost_map.is_empty() { - (NanoTokens::zero(), NanoTokens::zero()) - } else { - self.pay_for_records(&cost_map, wallet).await? - }; - let res = StoragePaymentResult { - storage_cost, - royalty_fees, - skipped_chunks, - }; - Ok(res) + Ok(map_xor_name) } - async fn pay_for_records( - &mut self, - cost_map: &BTreeMap)>, - wallet: &mut HotWallet, - ) -> Result<(NanoTokens, NanoTokens), PayError> { - // Before wallet progress, there shall be no `unconfirmed_spend_requests` - self.resend_pending_transactions(wallet).await; - - let total_cost = wallet.local_send_storage_payment(cost_map)?; - - // send to network - tracing::trace!("Sending storage payment transfer to the network"); - let spend_attempt_result = self - .send_spends(wallet.unconfirmed_spend_requests().iter()) - .await; - - tracing::trace!("send_spends of {} chunks completed", cost_map.len(),); + /// Get a raw chunk from the network. + pub async fn chunk_get(&self, addr: ChunkAddr) -> Result { + info!("Getting chunk: {addr:?}"); - // Here is bit risky that for the whole bunch of spends to the chunks' store_costs and royalty_fee - // they will get re-paid again for ALL, if any one of the payment failed to be put. - if let Err(error) = spend_attempt_result { - tracing::warn!("The storage payment transfer was not successfully registered in the network: {error:?}. It will be retried later."); + let key = NetworkAddress::from_chunk_address(ChunkAddress::new(addr)).to_record_key(); - // if we have a DoubleSpend error, lets remove the CashNote from the wallet - if let SendSpendsError::DoubleSpendAttemptedForCashNotes(spent_cash_notes) = &error { - for cash_note_key in spent_cash_notes { - tracing::warn!( - "Removing double spends CashNote from wallet: {cash_note_key:?}" - ); - wallet.mark_notes_as_spent([cash_note_key]); - wallet.clear_specific_spend_request(*cash_note_key); - } - } + let get_cfg = GetRecordCfg { + get_quorum: Quorum::One, + retry_strategy: None, + target_record: None, + expected_holders: HashSet::new(), + is_register: false, + }; - wallet.store_unconfirmed_spend_requests()?; + let record = self + .network + .get_record_from_network(key, &get_cfg) + .await + .inspect_err(|err| error!("Error fetching chunk: {err:?}"))?; + let header = RecordHeader::from_record(&record)?; - return Err(PayError::SendSpendsError(error)); + if let RecordKind::Chunk = header.kind { + let chunk: Chunk = try_deserialize_record(&record)?; + Ok(chunk) } else { - tracing::info!("Spend has completed: {:?}", spend_attempt_result); - wallet.clear_confirmed_spend_requests(); + Err(NetworkError::RecordKindMismatch(RecordKind::Chunk).into()) } - tracing::trace!("clear up spends of {} chunks completed", cost_map.len(),); - - Ok(total_cost) } - /// Directly writes Chunks to the network in the form of immutable self encrypted chunks. - async fn upload_chunk(&self, chunk: Chunk, wallet: &mut HotWallet) -> Result<(), PutError> { - let xor_name = *chunk.name(); - let (payment, payee) = self.get_recent_payment_for_addr(&xor_name, wallet)?; + /// Get the estimated cost of storing a piece of data. + pub async fn data_cost(&self, data: Bytes) -> Result { + let now = sn_networking::target_arch::Instant::now(); + let (data_map_chunk, chunks) = encrypt(data)?; - self.store_chunk(chunk, payee, payment).await?; + debug!("Encryption took: {:.2?}", now.elapsed()); - wallet.api().remove_payment_transaction(&xor_name); + let map_xor_name = *data_map_chunk.address().xorname(); + let mut content_addrs = vec![map_xor_name]; - Ok(()) - } - - /// Actually store a chunk to a peer. - async fn store_chunk( - &self, - chunk: Chunk, - payee: PeerId, - payment: Payment, - ) -> Result<(), PutError> { - tracing::debug!("Storing chunk: {chunk:?} to {payee:?}"); - - let key = chunk.network_address().to_record_key(); - - let record_kind = RecordKind::ChunkWithPayment; - let record = Record { - key: key.clone(), - value: try_serialize_record(&(payment, chunk.clone()), record_kind) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; + for chunk in &chunks { + content_addrs.push(*chunk.name()); + } - let put_cfg = PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy: None, - use_put_record_to: Some(vec![payee]), - verification: None, - }; - Ok(self.network.put_record(record, &put_cfg).await?) + info!( + "Calculating cost of storing {} chunks. Data map chunk at: {map_xor_name:?}", + content_addrs.len() + ); + + let cost_map = self + .get_store_quotes(content_addrs.into_iter()) + .await + .inspect_err(|err| error!("Error getting store quotes: {err:?}"))?; + let total_cost = AttoTokens::from_atto( + cost_map + .values() + .map(|quote| quote.2.cost.as_atto()) + .sum::(), + ); + Ok(total_cost) } } diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs deleted file mode 100644 index 21fd2d8d65..0000000000 --- a/autonomi/src/client/files.rs +++ /dev/null @@ -1,105 +0,0 @@ -use std::{collections::HashMap, path::PathBuf}; - -use bytes::Bytes; -use serde::{Deserialize, Serialize}; -use sn_transfers::HotWallet; -use walkdir::WalkDir; -use xor_name::XorName; - -use crate::Client; - -use super::data::{GetError, PutError}; - -/// Directory-like structure that containing file paths and their metadata. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Root { - pub map: HashMap, -} - -/// Structure that describes a file on the network. The actual data is stored in -/// chunks, to be constructed with the address pointing to the data map. -/// -/// This is similar to ['inodes'](https://en.wikipedia.org/wiki/Inode) in Unix-like filesystems. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct FilePointer { - data_map: XorName, - created_at: u64, - modified_at: u64, -} - -#[derive(Debug, thiserror::Error)] -pub enum UploadError { - #[error("Failed to recursively traverse directory")] - WalkDir(#[from] walkdir::Error), - #[error("Input/output failure")] - IoError(#[from] std::io::Error), - #[error("Failed to upload file")] - PutError(#[from] PutError), - #[error("Failed to fetch file")] - GetError(#[from] GetError), - #[error("Failed to serialize")] - Serialization(#[from] rmp_serde::encode::Error), - #[error("Failed to deserialize")] - Deserialization(#[from] rmp_serde::decode::Error), -} - -impl Client { - /// Upload a directory to the network. The directory is recursively walked. - #[cfg(feature = "fs")] - pub async fn upload_from_dir( - &mut self, - path: PathBuf, - wallet: &mut HotWallet, - ) -> Result<(Root, XorName), UploadError> { - let mut map = HashMap::new(); - for entry in WalkDir::new(path) { - let entry = entry?; - if !entry.file_type().is_file() { - continue; - } - let path = entry.path().to_path_buf(); - tracing::info!("Uploading file: {path:?}"); - let file = upload_from_file(self, path.clone(), wallet).await?; - map.insert(path, file); - } - - let root = Root { map }; - let root_serialized = rmp_serde::to_vec(&root)?; - - let xor_name = self.put(Bytes::from(root_serialized), wallet).await?; - - Ok((root, xor_name)) - } - - /// Fetch a directory from the network. - pub async fn fetch_root(&mut self, address: XorName) -> Result { - let data = self.get(address).await?; - let root: Root = rmp_serde::from_slice(&data[..])?; - - Ok(root) - } - - /// Fetch the file pointed to by the given pointer. - pub async fn fetch_file(&mut self, file: &FilePointer) -> Result { - let data = self.get(file.data_map).await?; - Ok(data) - } -} - -async fn upload_from_file( - client: &mut Client, - path: PathBuf, - wallet: &mut HotWallet, -) -> Result { - let data = tokio::fs::read(path).await?; - let data = Bytes::from(data); - - let addr = client.put(data, wallet).await?; - - // TODO: Set created_at and modified_at - Ok(FilePointer { - data_map: addr, - created_at: 0, - modified_at: 0, - }) -} diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs new file mode 100644 index 0000000000..674e03fc2b --- /dev/null +++ b/autonomi/src/client/fs.rs @@ -0,0 +1,195 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::client::Client; +use bytes::Bytes; +use sn_evm::EvmWallet; +use std::collections::HashMap; +use std::path::Path; + +use super::archive::{Archive, ArchiveAddr}; +use super::data::{DataAddr, GetError, PutError}; + +/// Errors that can occur during the file upload operation. +#[cfg(feature = "fs")] +#[derive(Debug, thiserror::Error)] +pub enum UploadError { + #[error("Failed to recursively traverse directory")] + WalkDir(#[from] walkdir::Error), + #[error("Input/output failure")] + IoError(#[from] std::io::Error), + #[error("Failed to upload file")] + PutError(#[from] PutError), + #[error("Failed to fetch file")] + GetError(#[from] GetError), + #[error("Failed to serialize")] + Serialization(#[from] rmp_serde::encode::Error), + #[error("Failed to deserialize")] + Deserialization(#[from] rmp_serde::decode::Error), +} + +#[cfg(feature = "fs")] +/// Errors that can occur during the download operation. +#[derive(Debug, thiserror::Error)] +pub enum DownloadError { + #[error("Failed to download file")] + GetError(#[from] GetError), + #[error("IO failure")] + IoError(#[from] std::io::Error), +} + +impl Client { + /// Download file from network to local file system + pub async fn file_download( + &self, + data_addr: DataAddr, + to_dest: &Path, + ) -> Result<(), DownloadError> { + let data = self.data_get(data_addr).await?; + if let Some(parent) = to_dest.parent() { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(to_dest, data).await?; + Ok(()) + } + + /// Download directory from network to local file system + pub async fn dir_download( + &self, + archive_addr: ArchiveAddr, + to_dest: &Path, + ) -> Result<(), DownloadError> { + let archive = self.archive_get(archive_addr).await?; + for (path, addr) in archive.map { + self.file_download(addr, &to_dest.join(path)).await?; + } + Ok(()) + } + + /// Download either a file or a directory depending on the data present at the provided address. + pub async fn download_file_or_dir( + &self, + address: DataAddr, + to_dest: &Path, + ) -> Result<(), DownloadError> { + let data = self.data_get(address).await?; + + if let Ok(archive) = Archive::from_bytes(&data) { + info!("Got an Archive from bytes, unpacking directory to {to_dest:?}"); + for (path, addr) in archive.map { + let dest = to_dest.join(path); + + #[cfg(feature = "loud")] + println!("Downloading file: {addr:?} to {dest:?}"); + + debug!("Downloading archived file: {addr:?} to {dest:?}"); + self.file_download(addr, &dest).await?; + } + } else { + info!("The downloaded data is not an Archive, saving it as a file."); + #[cfg(feature = "loud")] + println!("Downloading file: {address:?} to {to_dest:?}"); + if let Some(parent) = to_dest.parent() { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(to_dest, data).await?; + } + + Ok(()) + } + + /// Upload a directory to the network. The directory is recursively walked. + /// Reads all files, splits into chunks, uploads chunks, uploads datamaps, uploads archive, returns ArchiveAddr (pointing to the archive) + pub async fn dir_upload( + &self, + dir_path: &Path, + wallet: &EvmWallet, + ) -> Result { + let mut map = HashMap::new(); + + for entry in walkdir::WalkDir::new(dir_path) { + let entry = entry?; + + if !entry.file_type().is_file() { + continue; + } + + let path = entry.path(); + tracing::info!("Uploading file: {path:?}"); + #[cfg(feature = "loud")] + println!("Uploading file: {path:?}"); + let file = self.file_upload(path, wallet).await?; + + map.insert(path.to_path_buf(), file); + } + + let archive = Archive { map }; + let archive_serialized = archive.into_bytes()?; + + let arch_addr = self.data_put(archive_serialized, wallet).await?; + + Ok(arch_addr) + } + + /// Upload a file to the network. + /// Reads file, splits into chunks, uploads chunks, uploads datamap, returns DataAddr (pointing to the datamap) + pub async fn file_upload( + &self, + path: &Path, + wallet: &EvmWallet, + ) -> Result { + let data = tokio::fs::read(path).await?; + let data = Bytes::from(data); + let addr = self.data_put(data, wallet).await?; + Ok(addr) + } + + /// Get the cost to upload a file/dir to the network. + /// quick and dirty implementation, please refactor once files are cleanly implemented + pub async fn file_cost(&self, path: &Path) -> Result { + let mut map = HashMap::new(); + let mut total_cost = sn_evm::Amount::ZERO; + + for entry in walkdir::WalkDir::new(path) { + let entry = entry?; + + if !entry.file_type().is_file() { + continue; + } + + let path = entry.path().to_path_buf(); + tracing::info!("Cost for file: {path:?}"); + + let data = tokio::fs::read(&path).await?; + let file_bytes = Bytes::from(data); + let file_cost = self.data_cost(file_bytes.clone()).await.expect("TODO"); + + total_cost += file_cost.as_atto(); + + // re-do encryption to get the correct map xorname here + // this code needs refactor + let now = sn_networking::target_arch::Instant::now(); + let (data_map_chunk, _) = crate::self_encryption::encrypt(file_bytes).expect("TODO"); + tracing::debug!("Encryption took: {:.2?}", now.elapsed()); + let map_xor_name = *data_map_chunk.address().xorname(); + + map.insert(path, map_xor_name); + } + + let root = Archive { map }; + let root_serialized = rmp_serde::to_vec(&root).expect("TODO"); + + let archive_cost = self + .data_cost(Bytes::from(root_serialized)) + .await + .expect("TODO"); + + total_cost += archive_cost.as_atto(); + Ok(total_cost.into()) + } +} diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index d8e80620b0..68dfe0d50a 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -1,26 +1,43 @@ -use std::{collections::HashSet, time::Duration}; +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. -use libp2p::{identity::Keypair, Multiaddr}; -use sn_client::networking::{multiaddr_is_global, Network, NetworkBuilder, NetworkEvent}; -use sn_protocol::{version::IDENTIFY_PROTOCOL_STR, CLOSE_GROUP_SIZE}; -use tokio::{sync::mpsc::Receiver, time::interval}; +pub mod address; #[cfg(feature = "data")] -#[cfg_attr(docsrs, doc(cfg(feature = "data")))] -mod data; -#[cfg(feature = "files")] -#[cfg_attr(docsrs, doc(cfg(feature = "files")))] -mod files; +pub mod archive; +#[cfg(feature = "data")] +pub mod data; +#[cfg(feature = "fs")] +pub mod fs; #[cfg(feature = "registers")] -#[cfg_attr(docsrs, doc(cfg(feature = "registers")))] -mod registers; -#[cfg(feature = "transfers")] -#[cfg_attr(docsrs, doc(cfg(feature = "transfers")))] -mod transfers; +pub mod registers; +#[cfg(feature = "vault")] +pub mod vault; + +#[cfg(target_arch = "wasm32")] +pub mod wasm; + +// private module with utility functions +mod utils; + +pub use sn_evm::Amount; + +use libp2p::{identity::Keypair, Multiaddr}; +use sn_networking::{interval, multiaddr_is_global, Network, NetworkBuilder, NetworkEvent}; +use sn_protocol::{version::IDENTIFY_PROTOCOL_STR, CLOSE_GROUP_SIZE}; +use std::{collections::HashSet, sync::Arc, time::Duration}; +use tokio::sync::mpsc; /// Time before considering the connection timed out. pub const CONNECT_TIMEOUT_SECS: u64 = 20; +const CLIENT_EVENT_CHANNEL_SIZE: usize = 100; + /// Represents a connection to the Autonomi network. /// /// # Example @@ -28,7 +45,7 @@ pub const CONNECT_TIMEOUT_SECS: u64 = 20; /// To connect to the network, use [`Client::connect`]. /// /// ```no_run -/// # use autonomi::Client; +/// # use autonomi::client::Client; /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// let peers = ["/ip4/127.0.0.1/udp/1234/quic-v1".parse()?]; @@ -39,6 +56,7 @@ pub const CONNECT_TIMEOUT_SECS: u64 = 20; #[derive(Clone)] pub struct Client { pub(crate) network: Network, + pub(crate) client_event_sender: Arc>>, } /// Error returned by [`Client::connect`]. @@ -58,7 +76,7 @@ impl Client { /// This will timeout after 20 seconds. (See [`CONNECT_TIMEOUT_SECS`].) /// /// ```no_run - /// # use autonomi::Client; + /// # use autonomi::client::Client; /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// let peers = ["/ip4/127.0.0.1/udp/1234/quic-v1".parse()?]; @@ -75,47 +93,59 @@ impl Client { // Spawn task to dial to the given peers let network_clone = network.clone(); let peers = peers.to_vec(); - let _handle = tokio::spawn(async move { + let _handle = sn_networking::target_arch::spawn(async move { for addr in peers { if let Err(err) = network_clone.dial(addr.clone()).await { + error!("Failed to dial addr={addr} with err: {err:?}"); eprintln!("addr={addr} Failed to dial: {err:?}"); }; } }); - let (sender, receiver) = tokio::sync::oneshot::channel(); - tokio::spawn(handle_event_receiver(event_receiver, sender)); + let (sender, receiver) = futures::channel::oneshot::channel(); + sn_networking::target_arch::spawn(handle_event_receiver(event_receiver, sender)); receiver.await.expect("sender should not close")?; - Ok(Self { network }) + Ok(Self { + network, + client_event_sender: Arc::new(None), + }) + } + + /// Receive events from the client. + pub fn enable_client_events(&mut self) -> mpsc::Receiver { + let (client_event_sender, client_event_receiver) = + tokio::sync::mpsc::channel(CLIENT_EVENT_CHANNEL_SIZE); + self.client_event_sender = Arc::new(Some(client_event_sender)); + client_event_receiver } } -fn build_client_and_run_swarm(local: bool) -> (Network, Receiver) { - // TODO: `root_dir` is only used for nodes. `NetworkBuilder` should not require it. - let root_dir = std::env::temp_dir(); - let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local, root_dir); +fn build_client_and_run_swarm(local: bool) -> (Network, mpsc::Receiver) { + let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local); // TODO: Re-export `Receiver` from `sn_networking`. Else users need to keep their `tokio` dependency in sync. // TODO: Think about handling the mDNS error here. let (network, event_receiver, swarm_driver) = network_builder.build_client().expect("mdns to succeed"); - let _swarm_driver = tokio::spawn(swarm_driver.run()); + let _swarm_driver = sn_networking::target_arch::spawn(swarm_driver.run()); (network, event_receiver) } async fn handle_event_receiver( - mut event_receiver: Receiver, - sender: tokio::sync::oneshot::Sender>, + mut event_receiver: mpsc::Receiver, + sender: futures::channel::oneshot::Sender>, ) { // We switch this to `None` when we've sent the oneshot 'connect' result. let mut sender = Some(sender); let mut unsupported_protocols = vec![]; let mut timeout_timer = interval(Duration::from_secs(CONNECT_TIMEOUT_SECS)); + + #[cfg(not(target_arch = "wasm32"))] timeout_timer.tick().await; loop { @@ -165,3 +195,15 @@ async fn handle_event_receiver( // TODO: Handle closing of network events sender } + +/// Events that can be broadcasted by the client. +pub enum ClientEvent { + UploadComplete(UploadSummary), +} + +/// Summary of an upload operation. +#[derive(Debug, Clone)] +pub struct UploadSummary { + pub record_count: usize, + pub tokens_spent: Amount, +} diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 79d302d787..fb3c55fa6c 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -1,176 +1,200 @@ -use std::collections::BTreeSet; +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. -use crate::Client; +/// Register Secret Key +pub use bls::SecretKey as RegisterSecretKey; +use sn_evm::Amount; +use sn_evm::AttoTokens; +use sn_evm::EvmWalletError; +use sn_networking::VerificationKind; +use sn_protocol::storage::RetryStrategy; +pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; -use bls::SecretKey; +use crate::client::data::PayError; +use crate::client::Client; +use crate::client::ClientEvent; +use crate::client::UploadSummary; use bytes::Bytes; use libp2p::kad::{Quorum, Record}; -use sn_client::networking::GetRecordCfg; -use sn_client::networking::NetworkError; -use sn_client::networking::PutRecordCfg; -use sn_client::registers::EntryHash; -use sn_client::registers::Permissions; -use sn_client::registers::Register as ClientRegister; -use sn_client::registers::SignedRegister; -use sn_client::transfers::HotWallet; +use sn_evm::EvmWallet; +use sn_networking::{GetRecordCfg, GetRecordError, NetworkError, PutRecordCfg}; use sn_protocol::storage::try_deserialize_record; use sn_protocol::storage::try_serialize_record; use sn_protocol::storage::RecordKind; -use sn_protocol::storage::RegisterAddress; use sn_protocol::NetworkAddress; +use sn_registers::Register as BaseRegister; +use sn_registers::{Permissions, RegisterCrdt, RegisterOp, SignedRegister}; +use std::collections::BTreeSet; use xor_name::XorName; -use super::data::PayError; - #[derive(Debug, thiserror::Error)] pub enum RegisterError { #[error("Network error")] Network(#[from] NetworkError), #[error("Serialization error")] Serialization, + #[error("Register could not be verified (corrupt)")] + FailedVerification, #[error("Payment failure occurred during register creation.")] Pay(#[from] PayError), #[error("Failed to retrieve wallet payment")] - Wallet(#[from] sn_transfers::WalletError), + Wallet(#[from] EvmWalletError), #[error("Failed to write to low-level register")] Write(#[source] sn_registers::Error), #[error("Failed to sign register")] CouldNotSign(#[source] sn_registers::Error), + #[error("Received invalid quote from node, this node is possibly malfunctioning, try another node by trying another register name")] + InvalidQuote, } #[derive(Clone, Debug)] pub struct Register { - inner: SignedRegister, + signed_reg: SignedRegister, + crdt_reg: RegisterCrdt, } impl Register { pub fn address(&self) -> &RegisterAddress { - self.inner.address() + self.signed_reg.address() } /// Retrieve the current values of the register. There can be multiple values /// in case a register was updated concurrently. This is because of the nature /// of registers, which allows for network concurrency. pub fn values(&self) -> Vec { - self.inner - .clone() - .base_register() + self.crdt_reg .read() .into_iter() .map(|(_hash, value)| value.into()) .collect() } -} -impl Client { - /// Creates a new Register with an initial value and uploads it to the network. - pub async fn create_register( - &mut self, - value: Bytes, + fn new( + initial_value: Option, name: XorName, - owner: SecretKey, - wallet: &mut HotWallet, + owner: RegisterSecretKey, + permissions: RegisterPermissions, ) -> Result { let pk = owner.public_key(); - // Owner can write to the register. - let permissions = Permissions::new_with([pk]); - let mut register = ClientRegister::new(pk, name, permissions); - let address = NetworkAddress::from_register_address(*register.address()); + let base_register = BaseRegister::new(pk, name, permissions); - let entries = register - .read() - .into_iter() - .map(|(entry_hash, _value)| entry_hash) - .collect(); - register - .write(value.into(), &entries, &owner) - .map_err(RegisterError::Write)?; + let signature = owner.sign(base_register.bytes().map_err(RegisterError::Write)?); + let signed_reg = SignedRegister::new(base_register, signature, BTreeSet::new()); - let _payment_result = self - .pay(std::iter::once(register.address().xorname()), wallet) - .await?; + let crdt_reg = RegisterCrdt::new(*signed_reg.address()); - let (payment, payee) = - self.get_recent_payment_for_addr(®ister.address().xorname(), wallet)?; + let mut register = Register { + signed_reg, + crdt_reg, + }; - let signed_register = register - .clone() - .into_signed(&owner) - .map_err(RegisterError::CouldNotSign)?; + if let Some(value) = initial_value { + register.write_atop(&value, &owner)?; + } - let record = Record { - key: address.to_record_key(), - value: try_serialize_record( - &(payment, &signed_register), - RecordKind::RegisterWithPayment, - ) - .map_err(|_| RegisterError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; + Ok(register) + } - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: None, - use_put_record_to: Some(vec![payee]), - verification: None, - }; + fn write_atop(&mut self, entry: &[u8], owner: &RegisterSecretKey) -> Result<(), RegisterError> { + let children: BTreeSet<_> = self.crdt_reg.read().iter().map(|(hash, _)| *hash).collect(); - self.network.put_record(record, &put_cfg).await?; + let (_hash, address, crdt_op) = self + .crdt_reg + .write(entry.to_vec(), &children) + .map_err(RegisterError::Write)?; - Ok(Register { - inner: signed_register, - }) + let op = RegisterOp::new(address, crdt_op, owner); + + let _ = self.signed_reg.add_op(op); + + Ok(()) + } +} + +impl Client { + /// Generate a new register key + pub fn register_generate_key() -> RegisterSecretKey { + RegisterSecretKey::random() } /// Fetches a Register from the network. - pub async fn fetch_register( - &self, - address: RegisterAddress, - ) -> Result { + pub async fn register_get(&self, address: RegisterAddress) -> Result { + info!("Fetching register at addr: {address}"); let network_address = NetworkAddress::from_register_address(address); let key = network_address.to_record_key(); let get_cfg = GetRecordCfg { - get_quorum: Quorum::One, + get_quorum: Quorum::Majority, retry_strategy: None, target_record: None, expected_holders: Default::default(), + is_register: true, }; - let record = self.network.get_record_from_network(key, &get_cfg).await?; + let signed_reg = match self.network.get_record_from_network(key, &get_cfg).await { + Ok(record) => { + let signed_reg: SignedRegister = + try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; + signed_reg + } + // manage forked register case + Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { + debug!("Forked register detected for {address:?} merging forks"); + let mut registers: Vec = vec![]; + for (_, (record, _)) in result_map { + registers.push( + try_deserialize_record(&record) + .map_err(|_| RegisterError::Serialization)?, + ); + } + let register = registers.iter().fold(registers[0].clone(), |mut acc, x| { + if let Err(e) = acc.merge(x) { + warn!("Ignoring forked register as we failed to merge conflicting registers at {}: {e}", x.address()); + } + acc + }); + register + } + Err(e) => { + error!("Failed to get register {address:?} from network: {e}"); + Err(e)? + } + }; + + // Make sure the fetched record contains valid CRDT operations + signed_reg + .verify() + .map_err(|_| RegisterError::FailedVerification)?; - let register: SignedRegister = - try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; + let mut crdt_reg = RegisterCrdt::new(*signed_reg.address()); + for op in signed_reg.ops() { + if let Err(err) = crdt_reg.apply_op(op.clone()) { + return Err(RegisterError::Write(err)); + } + } - Ok(Register { inner: register }) + Ok(Register { + signed_reg, + crdt_reg, + }) } /// Updates a Register on the network with a new value. This will overwrite existing value(s). - pub async fn update_register( + pub async fn register_update( &self, - register: Register, + mut register: Register, new_value: Bytes, - owner: SecretKey, + owner: RegisterSecretKey, ) -> Result<(), RegisterError> { - // Fetch the current register - let mut signed_register = register.inner; - let mut register = signed_register.base_register().clone(); - - // Get all current branches - let children: BTreeSet = register.read().into_iter().map(|(e, _)| e).collect(); - - // Write the new value to all branches - let (_, op) = register - .write(new_value.to_vec(), &children, &owner) - .map_err(RegisterError::Write)?; + register.write_atop(&new_value, &owner)?; - // Apply the operation to the register - signed_register - .add_op(op.clone()) - .map_err(RegisterError::Write)?; + let signed_register = register.signed_reg.clone(); // Prepare the record for network storage let record = Record { @@ -182,16 +206,169 @@ impl Client { expires: None, }; + let get_cfg = GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::default()), + target_record: None, + expected_holders: Default::default(), + is_register: true, + }; let put_cfg = PutRecordCfg { put_quorum: Quorum::All, retry_strategy: None, use_put_record_to: None, - verification: None, + verification: Some((VerificationKind::Network, get_cfg)), }; // Store the updated register on the network - self.network.put_record(record, &put_cfg).await?; + self.network + .put_record(record, &put_cfg) + .await + .inspect_err(|err| { + error!( + "Failed to put record - register {:?} to the network: {err}", + register.address() + ) + })?; Ok(()) } + + /// Get the cost to create a register + pub async fn register_cost( + &self, + name: String, + owner: RegisterSecretKey, + ) -> Result { + info!("Getting cost for register with name: {name}"); + // get register address + let pk = owner.public_key(); + let name = XorName::from_content_parts(&[name.as_bytes()]); + let permissions = Permissions::new_with([pk]); + let register = Register::new(None, name, owner, permissions)?; + let reg_xor = register.address().xorname(); + + // get cost to store register + // NB TODO: register should be priced differently from other data + let cost_map = self.get_store_quotes(std::iter::once(reg_xor)).await?; + let total_cost = AttoTokens::from_atto( + cost_map + .values() + .map(|quote| quote.2.cost.as_atto()) + .sum::(), + ); + + Ok(total_cost) + } + + /// Get the address of a register from its name and owner + pub fn register_address(name: &str, owner: &RegisterSecretKey) -> RegisterAddress { + let pk = owner.public_key(); + let name = XorName::from_content_parts(&[name.as_bytes()]); + RegisterAddress::new(name, pk) + } + + /// Creates a new Register with a name and an initial value and uploads it to the network. + /// + /// The Register is created with the owner as the only writer. + pub async fn register_create( + &self, + value: Bytes, + name: &str, + owner: RegisterSecretKey, + wallet: &EvmWallet, + ) -> Result { + let pk = owner.public_key(); + let permissions = Permissions::new_with([pk]); + + self.register_create_with_permissions(value, name, owner, permissions, wallet) + .await + } + + /// Creates a new Register with a name and an initial value and uploads it to the network. + /// + /// Unlike `register_create`, this function allows you to specify the permissions for the register. + pub async fn register_create_with_permissions( + &self, + value: Bytes, + name: &str, + owner: RegisterSecretKey, + permissions: RegisterPermissions, + wallet: &EvmWallet, + ) -> Result { + info!("Creating register with name: {name}"); + let name = XorName::from_content_parts(&[name.as_bytes()]); + + // Owner can write to the register. + let register = Register::new(Some(value), name, owner, permissions)?; + let address = register.address(); + + let reg_xor = address.xorname(); + debug!("Paying for register at address: {address}"); + let (payment_proofs, _skipped) = self + .pay(std::iter::once(reg_xor), wallet) + .await + .inspect_err(|err| { + error!("Failed to pay for register at address: {address} : {err}") + })?; + let proof = if let Some(proof) = payment_proofs.get(®_xor) { + proof + } else { + // register was skipped, meaning it was already paid for + error!("Register at address: {address} was already paid for"); + return Err(RegisterError::Network(NetworkError::RegisterAlreadyExists)); + }; + + let payee = proof + .to_peer_id_payee() + .ok_or(RegisterError::InvalidQuote) + .inspect_err(|err| error!("Failed to get payee from payment proof: {err}"))?; + let signed_register = register.signed_reg.clone(); + + let record = Record { + key: NetworkAddress::from_register_address(*address).to_record_key(), + value: try_serialize_record( + &(proof, &signed_register), + RecordKind::RegisterWithPayment, + ) + .map_err(|_| RegisterError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; + + let get_cfg = GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::default()), + target_record: None, + expected_holders: Default::default(), + is_register: true, + }; + let put_cfg = PutRecordCfg { + put_quorum: Quorum::All, + retry_strategy: None, + use_put_record_to: Some(vec![payee]), + verification: Some((VerificationKind::Network, get_cfg)), + }; + + debug!("Storing register at address {address} to the network"); + self.network + .put_record(record, &put_cfg) + .await + .inspect_err(|err| { + error!("Failed to put record - register {address} to the network: {err}") + })?; + + if let Some(channel) = self.client_event_sender.as_ref() { + let summary = UploadSummary { + record_count: 1, + tokens_spent: proof.quote.cost.as_atto(), + }; + if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { + error!("Failed to send client event: {err}"); + } + } + + Ok(register) + } } diff --git a/autonomi/src/client/transfers.rs b/autonomi/src/client/transfers.rs deleted file mode 100644 index 9d8ba9f480..0000000000 --- a/autonomi/src/client/transfers.rs +++ /dev/null @@ -1,329 +0,0 @@ -use crate::wallet::MemWallet; -use crate::Client; -use sn_client::transfers::{MainPubkey, NanoTokens}; -use sn_transfers::{SpendReason, Transfer}; - -use sn_transfers::UniquePubkey; -use std::collections::BTreeSet; - -#[derive(Debug, thiserror::Error)] -pub enum SendSpendsError { - /// The cashnotes that were attempted to be spent have already been spent to another address - #[error("Double spend attempted with cashnotes: {0:?}")] - DoubleSpendAttemptedForCashNotes(BTreeSet), - /// A general error when a transfer fails - #[error("Failed to send tokens due to {0}")] - CouldNotSendMoney(String), -} - -#[derive(Debug, thiserror::Error)] -pub enum TransferError { - #[error("Failed to send tokens due to {0}")] - CouldNotSendMoney(String), - #[error("Wallet error: {0:?}")] - WalletError(#[from] crate::wallet::error::WalletError), - #[error("Network error: {0:?}")] - NetworkError(#[from] sn_client::networking::NetworkError), -} - -#[derive(Debug, thiserror::Error)] -pub enum CashNoteError { - #[error("CashNote was already spent.")] - AlreadySpent, - #[error("Failed to get spend: {0:?}")] - FailedToGetSpend(String), -} - -use libp2p::{ - futures::future::join_all, - kad::{Quorum, Record}, - PeerId, -}; -use sn_client::{ - networking::{ - GetRecordCfg, GetRecordError, Network, NetworkError, PutRecordCfg, VerificationKind, - }, - transfers::{HotWallet, SignedSpend}, -}; -use sn_protocol::{ - storage::{try_serialize_record, RecordKind, RetryStrategy, SpendAddress}, - NetworkAddress, PrettyPrintRecordKey, -}; -use sn_transfers::Payment; -use xor_name::XorName; - -use crate::VERIFY_STORE; -use sn_transfers::CashNote; -use std::collections::HashSet; - -#[derive(Debug, thiserror::Error)] -pub enum SendError { - #[error("CashNote amount unexpected: {0}")] - CashNoteAmountUnexpected(String), - #[error("CashNote has no parent spends.")] - CashNoteHasNoParentSpends, - #[error("Wallet error occurred during sending of transfer.")] - WalletError(#[from] crate::wallet::error::WalletError), - #[error("Encountered transfer error during sending.")] - TransferError(#[from] sn_transfers::TransferError), - #[error("Spends error: {0:?}")] - SpendsError(#[from] SendSpendsError), -} - -#[derive(Debug, thiserror::Error)] -pub enum ReceiveError { - #[error("Could not deserialize `Transfer`.")] - TransferDeserializationFailed, - #[error("Transfer error occurred during receiving.")] - TransferError(#[from] TransferError), -} - -// Hide these from the docs. -#[doc(hidden)] -impl Client { - /// Send spend requests to the network. - pub async fn send_spends( - &self, - spend_requests: impl Iterator, - ) -> Result<(), SendSpendsError> { - let mut tasks = Vec::new(); - - // send spends to the network in parralel - for spend_request in spend_requests { - tracing::debug!( - "sending spend request to the network: {:?}: {spend_request:#?}", - spend_request.unique_pubkey() - ); - - let the_task = async move { - let cash_note_key = spend_request.unique_pubkey(); - let result = store_spend(self.network.clone(), spend_request.clone()).await; - - (cash_note_key, result) - }; - tasks.push(the_task); - } - - // wait for all the tasks to complete and gather the errors - let mut errors = Vec::new(); - let mut double_spent_keys = BTreeSet::new(); - for (spend_key, spend_attempt_result) in join_all(tasks).await { - match spend_attempt_result { - Err(sn_client::networking::NetworkError::GetRecordError( - GetRecordError::RecordDoesNotMatch(_), - )) - | Err(sn_client::networking::NetworkError::GetRecordError( - GetRecordError::SplitRecord { .. }, - )) => { - tracing::warn!( - "Double spend detected while trying to spend: {:?}", - spend_key - ); - double_spent_keys.insert(*spend_key); - } - Err(e) => { - tracing::warn!( - "Spend request errored out when sent to the network {spend_key:?}: {e}" - ); - errors.push((spend_key, e)); - } - Ok(()) => { - tracing::trace!( - "Spend request was successfully sent to the network: {spend_key:?}" - ); - } - } - } - - // report errors accordingly - // double spend errors in priority as they should be dealt with by the wallet - if !double_spent_keys.is_empty() { - return Err(SendSpendsError::DoubleSpendAttemptedForCashNotes( - double_spent_keys, - )); - } - if !errors.is_empty() { - let mut err_report = "Failed to send spend requests to the network:".to_string(); - for (spend_key, e) in &errors { - tracing::warn!("Failed to send spend request to the network: {spend_key:?}: {e}"); - err_report.push_str(&format!("{spend_key:?}: {e}")); - } - return Err(SendSpendsError::CouldNotSendMoney(err_report)); - } - - Ok(()) - } - - /// Resend failed transactions. This can optionally verify the store has been successful. - /// This will attempt to GET the cash_note from the network. - pub(super) async fn resend_pending_transactions(&mut self, wallet: &mut HotWallet) { - if wallet.unconfirmed_spend_requests().is_empty() { - return; - } - - if self - .send_spends(wallet.unconfirmed_spend_requests().iter()) - .await - .is_ok() - { - wallet.clear_confirmed_spend_requests(); - } - } - - /// Deposits all valid `CashNotes` from a transfer into a wallet. - pub(super) async fn receive_transfer( - &self, - transfer: Transfer, - wallet: &mut MemWallet, - ) -> Result<(), TransferError> { - let cash_note_redemptions = wallet - .unwrap_transfer(&transfer) - .map_err(TransferError::WalletError)?; - - let cash_notes = self - .network - .verify_cash_notes_redemptions(wallet.address(), &cash_note_redemptions) - .await?; - - for cash_note in cash_notes { - match self.verify_if_cash_note_is_valid(&cash_note).await { - Ok(_) => wallet.deposit_cash_note(cash_note)?, - Err(e) => { - tracing::warn!("Error verifying CashNote: {}", e); - } - } - } - - Ok(()) - } - - /// Verify if a `CashNote` is unspent. - pub(super) async fn verify_if_cash_note_is_valid( - &self, - cash_note: &CashNote, - ) -> Result<(), CashNoteError> { - let pk = cash_note.unique_pubkey(); - let addr = SpendAddress::from_unique_pubkey(&pk); - - match self.network.get_spend(addr).await { - // if we get a RecordNotFound, it means the CashNote is not spent, which is good - Err(NetworkError::GetRecordError(GetRecordError::RecordNotFound)) => Ok(()), - // if we get a spend, it means the CashNote is already spent - Ok(_) => Err(CashNoteError::AlreadySpent), - // report all other errors - Err(e) => Err(CashNoteError::FailedToGetSpend(format!("{e}"))), - } - } - - /// Returns the most recent cached Payment for a provided NetworkAddress. This function does not check if the - /// quote has expired or not. Use get_non_expired_payment_for_addr if you want to get a non expired one. - /// - /// If multiple payments have been made to the same address, then we pick the last one as it is the most recent. - pub fn get_recent_payment_for_addr( - &self, - xor_name: &XorName, - wallet: &mut HotWallet, - ) -> Result<(Payment, PeerId), sn_transfers::WalletError> { - let payment_detail = wallet.api().get_recent_payment(xor_name)?; - - let payment = payment_detail.to_payment(); - let peer_id = PeerId::from_bytes(&payment_detail.peer_id_bytes) - .expect("payment detail should have a valid peer id"); - - Ok((payment, peer_id)) - } - - /// Creates a `Transfer` that can be received by the receiver. - /// Once received, it will be turned into a `CashNote` that the receiver can spend. - pub async fn send( - &mut self, - to: MainPubkey, - amount_in_nano: NanoTokens, - reason: Option, - wallet: &mut MemWallet, - ) -> Result { - let signed_transaction = - wallet.create_signed_transaction(vec![(amount_in_nano, to)], reason)?; - - // return the first CashNote (assuming there is only one because we only sent to one recipient) - let cash_note_for_recipient = match &signed_transaction.output_cashnotes[..] { - [cash_note] => Ok(cash_note), - [_multiple, ..] => Err(SendError::CashNoteAmountUnexpected( - "Got multiple, expected 1.".into(), - )), - [] => Err(SendError::CashNoteAmountUnexpected( - "Got 0, expected 1.".into(), - )), - }?; - - let transfer = Transfer::transfer_from_cash_note(cash_note_for_recipient) - .map_err(SendError::TransferError)?; - - self.send_spends(signed_transaction.spends.iter()).await?; - - wallet.process_signed_transaction(signed_transaction.clone()); - - for spend in &signed_transaction.spends { - wallet.add_pending_spend(spend.clone()); - } - - Ok(transfer) - } - - /// Receive a `CashNoteRedemption` through a transfer message. - pub async fn receive( - &self, - transfer_hex: &str, - wallet: &mut MemWallet, - ) -> Result<(), ReceiveError> { - let transfer = Transfer::from_hex(transfer_hex) - .map_err(|_| ReceiveError::TransferDeserializationFailed)?; - self.receive_transfer(transfer, wallet).await?; - Ok(()) - } -} - -/// Send a `SpendCashNote` request to the network. -async fn store_spend(network: Network, spend: SignedSpend) -> Result<(), NetworkError> { - let unique_pubkey = *spend.unique_pubkey(); - let cash_note_addr = SpendAddress::from_unique_pubkey(&unique_pubkey); - let network_address = NetworkAddress::from_spend_address(cash_note_addr); - - let key = network_address.to_record_key(); - let pretty_key = PrettyPrintRecordKey::from(&key); - tracing::trace!("Sending spend {unique_pubkey:?} to the network via put_record, with addr of {cash_note_addr:?} - {pretty_key:?}"); - let record_kind = RecordKind::Spend; - let record = Record { - key, - value: try_serialize_record(&[spend], record_kind)?.to_vec(), - publisher: None, - expires: None, - }; - - let (record_to_verify, expected_holders) = if VERIFY_STORE { - let expected_holders: HashSet<_> = network - .get_closest_peers(&network_address, true) - .await? - .iter() - .cloned() - .collect(); - (Some(record.clone()), expected_holders) - } else { - (None, Default::default()) - }; - - // When there is retry on Put side, no need to have a retry on Get - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: None, - target_record: record_to_verify, - expected_holders, - }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::Majority, - retry_strategy: Some(RetryStrategy::Persistent), - use_put_record_to: None, - verification: Some((VerificationKind::Network, verification_cfg)), - }; - network.put_record(record, &put_cfg).await -} diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs new file mode 100644 index 0000000000..0714f60d9d --- /dev/null +++ b/autonomi/src/client/utils.rs @@ -0,0 +1,272 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::{ + collections::{BTreeMap, HashMap}, + num::NonZero, +}; + +use bytes::Bytes; +use libp2p::kad::{Quorum, Record}; +use rand::{thread_rng, Rng}; +use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; +use sn_evm::{EvmWallet, ProofOfPayment, QuoteHash, QuotePayment, TxHash}; +use sn_networking::{ + GetRecordCfg, Network, NetworkError, PayeeQuote, PutRecordCfg, VerificationKind, +}; +use sn_protocol::{ + messages::ChunkProof, + storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind, RetryStrategy}, + NetworkAddress, +}; +use xor_name::XorName; + +use crate::self_encryption::DataMapLevel; + +use super::{ + data::{GetError, PayError, PutError}, + Client, +}; + +impl Client { + /// Fetch and decrypt all chunks in the data map. + pub(crate) async fn fetch_from_data_map(&self, data_map: &DataMap) -> Result { + let mut encrypted_chunks = vec![]; + + for info in data_map.infos() { + let chunk = self + .chunk_get(info.dst_hash) + .await + .inspect_err(|err| error!("Error fetching chunk {:?}: {err:?}", info.dst_hash))?; + let chunk = EncryptedChunk { + index: info.index, + content: chunk.value, + }; + encrypted_chunks.push(chunk); + } + + let data = decrypt_full_set(data_map, &encrypted_chunks).map_err(|e| { + error!("Error decrypting encrypted_chunks: {e:?}"); + GetError::Decryption(crate::self_encryption::Error::SelfEncryption(e)) + })?; + + Ok(data) + } + + /// Unpack a wrapped data map and fetch all bytes using self-encryption. + pub(crate) async fn fetch_from_data_map_chunk( + &self, + data_map_bytes: &Bytes, + ) -> Result { + let mut data_map_level: DataMapLevel = rmp_serde::from_slice(data_map_bytes) + .map_err(GetError::InvalidDataMap) + .inspect_err(|err| error!("Error deserializing data map: {err:?}"))?; + + loop { + let data_map = match &data_map_level { + DataMapLevel::First(map) => map, + DataMapLevel::Additional(map) => map, + }; + + let data = self.fetch_from_data_map(data_map).await?; + + match &data_map_level { + DataMapLevel::First(_) => break Ok(data), + DataMapLevel::Additional(_) => { + data_map_level = rmp_serde::from_slice(&data).map_err(|err| { + error!("Error deserializing data map: {err:?}"); + GetError::InvalidDataMap(err) + })?; + continue; + } + }; + } + } + + pub(crate) async fn chunk_upload_with_payment( + &self, + chunk: Chunk, + payment: ProofOfPayment, + ) -> Result<(), PutError> { + let storing_node = payment.to_peer_id_payee().expect("Missing node Peer ID"); + + debug!("Storing chunk: {chunk:?} to {:?}", storing_node); + + let key = chunk.network_address().to_record_key(); + + let record_kind = RecordKind::ChunkWithPayment; + let record = Record { + key: key.clone(), + value: try_serialize_record(&(payment, chunk.clone()), record_kind) + .map_err(|e| { + PutError::Serialization(format!( + "Failed to serialize chunk with payment: {e:?}" + )) + })? + .to_vec(), + publisher: None, + expires: None, + }; + + let verification = { + let verification_cfg = GetRecordCfg { + get_quorum: Quorum::N(NonZero::new(2).expect("2 is non-zero")), + retry_strategy: Some(RetryStrategy::Quick), + target_record: None, + expected_holders: Default::default(), + is_register: false, + }; + + let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk) + .map_err(|e| PutError::Serialization(format!("Failed to serialize chunk: {e:?}")))? + .to_vec(); + let random_nonce = thread_rng().gen::(); + let expected_proof = ChunkProof::new(&stored_on_node, random_nonce); + + Some(( + VerificationKind::ChunkProof { + expected_proof, + nonce: random_nonce, + }, + verification_cfg, + )) + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::One, + retry_strategy: Some(RetryStrategy::Balanced), + use_put_record_to: Some(vec![storing_node]), + verification, + }; + Ok(self.network.put_record(record, &put_cfg).await?) + } + + /// Pay for the chunks and get the proof of payment. + pub(crate) async fn pay( + &self, + content_addrs: impl Iterator, + wallet: &EvmWallet, + ) -> Result<(HashMap, Vec), PayError> { + let cost_map = self.get_store_quotes(content_addrs).await?; + let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); + + // TODO: the error might contain some succeeded quote payments as well. These should be returned on err, so that they can be skipped when retrying. + // TODO: retry when it fails? + // Execute chunk payments + let payments = wallet + .pay_for_quotes(quote_payments) + .await + .map_err(|err| PayError::from(err.0))?; + + let proofs = construct_proofs(&cost_map, &payments); + + trace!( + "Chunk payments of {} chunks completed. {} chunks were free / already paid for", + proofs.len(), + skipped_chunks.len() + ); + + Ok((proofs, skipped_chunks)) + } + + pub(crate) async fn get_store_quotes( + &self, + content_addrs: impl Iterator, + ) -> Result, PayError> { + let futures: Vec<_> = content_addrs + .into_iter() + .map(|content_addr| fetch_store_quote_with_retries(&self.network, content_addr)) + .collect(); + + let quotes = futures::future::try_join_all(futures).await?; + + Ok(quotes.into_iter().collect::>()) + } +} + +/// Fetch a store quote for a content address with a retry strategy. +async fn fetch_store_quote_with_retries( + network: &Network, + content_addr: XorName, +) -> Result<(XorName, PayeeQuote), PayError> { + let mut retries = 0; + + loop { + match fetch_store_quote(network, content_addr).await { + Ok(quote) => { + break Ok((content_addr, quote)); + } + Err(err) if retries < 2 => { + retries += 1; + error!("Error while fetching store quote: {err:?}, retry #{retries}"); + } + Err(err) => { + error!( + "Error while fetching store quote: {err:?}, stopping after {retries} retries" + ); + break Err(PayError::CouldNotGetStoreQuote(content_addr)); + } + } + } +} + +/// Fetch a store quote for a content address. +async fn fetch_store_quote( + network: &Network, + content_addr: XorName, +) -> Result { + network + .get_store_costs_from_network( + NetworkAddress::from_chunk_address(ChunkAddress::new(content_addr)), + vec![], + ) + .await +} + +/// Form to be executed payments and already executed payments from a cost map. +fn extract_quote_payments( + cost_map: &HashMap, +) -> (Vec, Vec) { + let mut to_be_paid = vec![]; + let mut already_paid = vec![]; + + for (chunk_address, quote) in cost_map.iter() { + if quote.2.cost.is_zero() { + already_paid.push(*chunk_address); + } else { + to_be_paid.push(( + quote.2.hash(), + quote.2.rewards_address, + quote.2.cost.as_atto(), + )); + } + } + + (to_be_paid, already_paid) +} + +/// Construct payment proofs from cost map and payments map. +fn construct_proofs( + cost_map: &HashMap, + payments: &BTreeMap, +) -> HashMap { + cost_map + .iter() + .filter_map(|(xor_name, (_, _, quote))| { + payments.get("e.hash()).map(|tx_hash| { + ( + *xor_name, + ProofOfPayment { + quote: quote.clone(), + tx_hash: *tx_hash, + }, + ) + }) + }) + .collect() +} diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs new file mode 100644 index 0000000000..af40f61cf6 --- /dev/null +++ b/autonomi/src/client/vault.rs @@ -0,0 +1,186 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::collections::HashSet; + +use crate::client::data::PutError; +use crate::client::Client; +use bls::SecretKey; +use bytes::Bytes; +use libp2p::kad::{Quorum, Record}; +use sn_evm::EvmWallet; +use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}; +use sn_protocol::storage::{ + try_serialize_record, RecordKind, RetryStrategy, Scratchpad, ScratchpadAddress, +}; +use sn_protocol::{storage::try_deserialize_record, NetworkAddress}; +use tracing::info; + +#[derive(Debug, thiserror::Error)] +pub enum VaultError { + #[error("Could not generate Vault secret key from entropy: {0:?}")] + Bls(#[from] bls::Error), + #[error("Scratchpad found at {0:?} was not a valid record.")] + CouldNotDeserializeVaultScratchPad(ScratchpadAddress), + #[error("Protocol: {0}")] + Protocol(#[from] sn_protocol::Error), + #[error("Network: {0}")] + Network(#[from] NetworkError), +} + +impl Client { + /// Retrieves and returns a decrypted vault if one exists. + pub async fn fetch_and_decrypt_vault( + &self, + secret_key: &SecretKey, + ) -> Result, VaultError> { + info!("Fetching and decrypting vault"); + let pad = self.get_vault_from_network(secret_key).await?; + + Ok(pad.decrypt_data(secret_key)?) + } + + /// Gets the vault Scratchpad from a provided client public key + async fn get_vault_from_network( + &self, + secret_key: &SecretKey, + ) -> Result { + let client_pk = secret_key.public_key(); + + let scratch_address = ScratchpadAddress::new(client_pk); + let network_address = NetworkAddress::from_scratchpad_address(scratch_address); + info!("Fetching vault from network at {network_address:?}",); + let scratch_key = network_address.to_record_key(); + + let get_cfg = GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: None, + target_record: None, + expected_holders: HashSet::new(), + is_register: false, + }; + + let record = self + .network + .get_record_from_network(scratch_key, &get_cfg) + .await + .inspect_err(|err| { + debug!("Failed to fetch vault {network_address:?} from network: {err}"); + })?; + + let pad = try_deserialize_record::(&record) + .map_err(|_| VaultError::CouldNotDeserializeVaultScratchPad(scratch_address))?; + + Ok(pad) + } + + /// Put data into the client's VaultPacket + /// + /// Pays for a new VaultPacket if none yet created for the client. Returns the current version + /// of the data on success. + pub async fn write_bytes_to_vault( + &mut self, + data: Bytes, + wallet: &mut EvmWallet, + secret_key: &SecretKey, + ) -> Result { + let client_pk = secret_key.public_key(); + + let pad_res = self.get_vault_from_network(secret_key).await; + let mut is_new = true; + + let mut scratch = if let Ok(existing_data) = pad_res { + info!("Scratchpad already exists, returning existing data"); + + info!( + "scratch already exists, is version {:?}", + existing_data.count() + ); + + is_new = false; + existing_data + } else { + trace!("new scratchpad creation"); + Scratchpad::new(client_pk) + }; + + let next_count = scratch.update_and_sign(data, secret_key); + let scratch_address = scratch.network_address(); + let scratch_key = scratch_address.to_record_key(); + + info!("Writing to vault at {scratch_address:?}",); + + let record = if is_new { + self.pay( + [&scratch_address].iter().filter_map(|f| f.as_xorname()), + wallet, + ) + .await + .inspect_err(|err| { + error!("Failed to pay for new vault at addr: {scratch_address:?} : {err}"); + })?; + + let scratch_xor = scratch_address.as_xorname().ok_or(PutError::VaultXorName)?; + let (payment_proofs, _) = self.pay(std::iter::once(scratch_xor), wallet).await?; + // Should always be there, else it would have failed on the payment step. + let proof = payment_proofs.get(&scratch_xor).expect("Missing proof"); + + Record { + key: scratch_key, + value: try_serialize_record(&(proof, scratch), RecordKind::ScratchpadWithPayment) + .map_err(|_| { + PutError::Serialization( + "Failed to serialize scratchpad with payment".to_string(), + ) + })? + .to_vec(), + publisher: None, + expires: None, + } + } else { + Record { + key: scratch_key, + value: try_serialize_record(&scratch, RecordKind::Scratchpad) + .map_err(|_| { + PutError::Serialization("Failed to serialize scratchpad".to_string()) + })? + .to_vec(), + publisher: None, + expires: None, + } + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::Balanced), + use_put_record_to: None, + verification: Some(( + VerificationKind::Network, + GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: None, + target_record: None, + expected_holders: HashSet::new(), + is_register: false, + }, + )), + }; + + debug!("Put record - scratchpad at {scratch_address:?} to the network"); + self.network + .put_record(record, &put_cfg) + .await + .inspect_err(|err| { + error!( + "Failed to put scratchpad {scratch_address:?} to the network with err: {err:?}" + ) + })?; + + Ok(next_count) + } +} diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs new file mode 100644 index 0000000000..b6149776fe --- /dev/null +++ b/autonomi/src/client/wasm.rs @@ -0,0 +1,105 @@ +use libp2p::Multiaddr; +use wasm_bindgen::prelude::*; + +#[wasm_bindgen] +pub struct Client(super::Client); + +#[wasm_bindgen] +pub struct ChunkAddr(xor_name::XorName); + +#[wasm_bindgen] +pub struct DataAddr(xor_name::XorName); +#[wasm_bindgen] +impl DataAddr { + #[wasm_bindgen(js_name = toString)] + pub fn to_string(&self) -> String { + crate::client::address::addr_to_str(self.0) + } +} + +#[wasm_bindgen] +pub struct AttoTokens(sn_evm::AttoTokens); +#[wasm_bindgen] +impl AttoTokens { + #[wasm_bindgen(js_name = toString)] + pub fn to_string(&self) -> String { + self.0.to_string() + } +} + +#[wasm_bindgen] +impl Client { + #[wasm_bindgen(constructor)] + pub async fn connect(peers: Vec) -> Result { + let peers = peers + .into_iter() + .map(|peer| peer.parse()) + .collect::, _>>()?; + + let client = super::Client::connect(&peers).await?; + + Ok(Client(client)) + } + + #[wasm_bindgen(js_name = chunkPut)] + pub async fn chunk_put(&self, _data: Vec, _wallet: Wallet) -> Result { + async { unimplemented!() }.await + } + + #[wasm_bindgen(js_name = chunkGet)] + pub async fn chunk_get(&self, addr: ChunkAddr) -> Result, JsError> { + let chunk = self.0.chunk_get(addr.0).await?; + Ok(chunk.value().to_vec()) + } + + #[wasm_bindgen(js_name = dataPut)] + pub async fn data_put(&self, data: Vec, wallet: Wallet) -> Result { + let data = crate::Bytes::from(data); + let xorname = self.0.data_put(data, &wallet.0).await?; + Ok(DataAddr(xorname)) + } + + #[wasm_bindgen(js_name = dataGet)] + pub async fn data_get(&self, addr: DataAddr) -> Result, JsError> { + let data = self.0.data_get(addr.0).await?; + Ok(data.to_vec()) + } + + #[wasm_bindgen(js_name = dataCost)] + pub async fn data_cost(&self, data: Vec) -> Result { + let data = crate::Bytes::from(data); + let cost = self.0.data_cost(data).await.map_err(JsError::from)?; + + Ok(AttoTokens(cost)) + } +} + +#[wasm_bindgen] +pub struct Wallet(evmlib::wallet::Wallet); + +/// Get a funded wallet for testing. This either uses a default private key or the `EVM_PRIVATE_KEY` +/// environment variable that was used during the build process of this library. +#[wasm_bindgen(js_name = getFundedWallet)] +pub fn funded_wallet() -> Wallet { + Wallet(test_utils::evm::get_funded_wallet()) +} + +/// Enable tracing logging in the console. +/// +/// A level could be passed like `trace` or `warn`. Or set for a specific module/crate +/// with `sn_networking=trace,autonomi=info`. +#[wasm_bindgen(js_name = logInit)] +pub fn log_init(directive: String) { + use tracing_subscriber::prelude::*; + + console_error_panic_hook::set_once(); + + let fmt_layer = tracing_subscriber::fmt::layer() + .with_ansi(false) // Only partially supported across browsers + .without_time() // std::time is not available in browsers + .with_writer(tracing_web::MakeWebConsoleWriter::new()); // write events to the console + tracing_subscriber::registry() + .with(fmt_layer) + .with(tracing_subscriber::EnvFilter::new(directive)) + .init(); +} diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index d3e835666b..abfbd7563a 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + //! Connect to and build on the Autonomi network. //! //! # Data types @@ -14,23 +22,6 @@ //! and the history is kept. Multiple values can exist side by side in case of //! concurrency, but should converge to a single value eventually. //! -//! # Example -//! -//! ```no_run -//! # use autonomi::{Client, Bytes}; -//! # #[tokio::main] -//! # async fn main() -> Result<(), Box> { -//! let peers = ["/ip4/127.0.0.1/udp/1234/quic-v1".parse()?]; -//! let client = Client::connect(&peers).await?; -//! -//! # let mut wallet = todo!(); -//! let addr = client.put(Bytes::from("Hello, World"), &mut wallet).await?; -//! let data = client.get(addr).await?; -//! assert_eq!(data, Bytes::from("Hello, World")); -//! # Ok(()) -//! # } -//! ``` -//! //! # Features //! //! - `local`: Discover local peers using mDNS. Useful for development. @@ -38,16 +29,21 @@ // docs.rs generation will enable unstable `doc_cfg` feature #![cfg_attr(docsrs, feature(doc_cfg))] +#[macro_use] +extern crate tracing; + +pub mod client; +#[cfg(feature = "data")] +mod self_encryption; + +pub use sn_evm::get_evm_network_from_env; +pub use sn_evm::EvmNetwork; +pub use sn_evm::EvmWallet as Wallet; +pub use sn_evm::RewardsAddress; + #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use bytes::Bytes; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use libp2p::Multiaddr; -pub use client::{Client, ConnectError, CONNECT_TIMEOUT_SECS}; - -mod client; -mod secrets; -mod self_encryption; -mod wallet; - -const VERIFY_STORE: bool = true; +pub use client::Client; diff --git a/autonomi/src/secrets.rs b/autonomi/src/secrets.rs deleted file mode 100644 index bdbe9ea800..0000000000 --- a/autonomi/src/secrets.rs +++ /dev/null @@ -1,42 +0,0 @@ -use sn_client::acc_packet::user_secret::account_wallet_secret_key; -use sn_client::transfers::MainSecretKey; - -#[derive(Debug, thiserror::Error)] -pub enum SecretsError { - /// Should never happen - #[error("Unexpected error")] - Unexpected, - /// Failed to parse entropy - #[error("Error parsing entropy for mnemonic phrase")] - FailedToParseEntropy, - /// Invalid mnemonic seed phrase - #[error("Invalid mnemonic seed phrase")] - InvalidMnemonicSeedPhrase, - /// Invalid key bytes - #[error("Invalid key bytes")] - InvalidKeyBytes, -} - -impl From for SecretsError { - fn from(value: sn_client::Error) -> Self { - match value { - sn_client::Error::FailedToParseEntropy => SecretsError::FailedToParseEntropy, - sn_client::Error::InvalidMnemonicSeedPhrase => SecretsError::InvalidMnemonicSeedPhrase, - sn_client::Error::InvalidKeyBytes => SecretsError::InvalidKeyBytes, - _ => SecretsError::Unexpected, - } - } -} - -#[allow(dead_code)] -fn generate_mnemonic() -> Result { - sn_client::acc_packet::user_secret::random_eip2333_mnemonic().map_err(SecretsError::from) -} - -#[allow(dead_code)] -fn main_sk_from_mnemonic( - mnemonic: bip39::Mnemonic, - derivation_passphrase: &str, -) -> Result { - account_wallet_secret_key(mnemonic, derivation_passphrase).map_err(SecretsError::from) -} diff --git a/autonomi/src/self_encryption.rs b/autonomi/src/self_encryption.rs index 34c3357787..097dcb69ce 100644 --- a/autonomi/src/self_encryption.rs +++ b/autonomi/src/self_encryption.rs @@ -68,7 +68,8 @@ fn pack_data_map(data_map: DataMap) -> Result<(Chunk, Vec), Error> { chunk.serialize(&mut serialiser)?; let serialized_chunk = bytes.into_inner().freeze(); - let (data_map, next_encrypted_chunks) = self_encryption::encrypt(serialized_chunk)?; + let (data_map, next_encrypted_chunks) = self_encryption::encrypt(serialized_chunk) + .inspect_err(|err| error!("Failed to encrypt chunks: {err:?}"))?; chunks = next_encrypted_chunks .iter() .map(|c| Chunk::new(c.content.clone())) // no need to encrypt what is self-encrypted @@ -85,6 +86,8 @@ fn wrap_data_map(data_map: &DataMapLevel) -> Result, -} - -impl MemWallet { - /// Create an empty wallet from a main secret key. - #[allow(dead_code)] - fn from_main_secret_key(main_secret_key: MainSecretKey) -> Self { - Self { - hot_wallet: HotWallet::new(main_secret_key, PathBuf::default()), - available_cash_notes: Default::default(), - } - } - - // TODO: as WASM can not save a wallet state to disk or load from disk -- we need to provide a wallet state manually. - /// Initialise a wallet from wallet state bytes containing all payments, (un)confirmed spends, cash notes and the secret key. - #[allow(dead_code)] - fn from_state_bytes>(_data: T) -> Self { - todo!() - } - - /// Returns the entire wallet state as bytes. That includes all payments (un)confirmed spends, cash notes and the secret key. - /// A wallet can be fully initialised again from these state bytes. - #[allow(dead_code)] - fn to_state_bytes(&self) -> Vec { - todo!() - } - - /// Returns the wallet address (main public key). - pub fn address(&self) -> MainPubkey { - self.hot_wallet.address() - } - - /// Returns the balance of a wallet in Nanos. - pub fn balance(&self) -> NanoTokens { - self.hot_wallet.balance() - } - - pub(super) fn unwrap_transfer( - &self, - transfer: &Transfer, - ) -> Result, WalletError> { - self.hot_wallet - .unwrap_transfer(transfer) - .map_err(|_| WalletError::FailedToDecryptTransfer) - } - - /// Returns all available `CashNotes` together with their secret key to spend them. - pub(super) fn cash_notes_with_secret_keys(&mut self) -> Vec { - self.available_cash_notes.values().cloned().collect() - } - - pub(super) fn create_signed_transaction( - &mut self, - outputs: Vec<(NanoTokens, MainPubkey)>, - reason: Option, - ) -> Result { - for output in &outputs { - if output.0.is_zero() { - return Err(WalletError::TransferAmountZero); - } - } - - let mut rng = &mut rand::rngs::OsRng; - - // create a unique key for each output - let to_unique_keys: Vec<_> = outputs - .into_iter() - .map(|(amount, address)| (amount, address, DerivationIndex::random(&mut rng), false)) - .collect(); - - let cash_notes_with_keys = self.cash_notes_with_secret_keys(); - let reason = reason.unwrap_or_default(); - - let unsigned_transaction = - UnsignedTransaction::new(cash_notes_with_keys, to_unique_keys, self.address(), reason)?; - let signed_transaction = unsigned_transaction.sign(self.hot_wallet.key())?; - - Ok(signed_transaction) - } - - fn mark_cash_notes_as_spent<'a, T: IntoIterator>( - &mut self, - unique_pubkeys: T, - ) { - let unique_pubkeys: Vec<&'a UniquePubkey> = unique_pubkeys.into_iter().collect(); - - for unique_pubkey in &unique_pubkeys { - let _ = self.available_cash_notes.remove(unique_pubkey); - } - - self.hot_wallet - .wo_wallet_mut() - .mark_notes_as_spent(unique_pubkeys); - } - - pub(super) fn deposit_cash_note(&mut self, cash_note: CashNote) -> Result<(), WalletError> { - if cash_note - .derived_pubkey(&self.hot_wallet.key().main_pubkey()) - .is_err() - { - return Err(WalletError::CashNoteNotOwned); - } - - self.available_cash_notes - .insert(cash_note.unique_pubkey(), cash_note.clone()); - - // DevNote: the deposit fn already does the checks above, - // but I have added them here just in case we get rid - // of the composited hotwallet and its deposit checks - self.hot_wallet - .wo_wallet_mut() - .deposit(&[cash_note]) - .map_err(|_| WalletError::CashNoteOutputNotFound)?; - - Ok(()) - } - - pub(super) fn add_pending_spend(&mut self, spend: SignedSpend) { - self.hot_wallet - .unconfirmed_spend_requests_mut() - .insert(spend); - } - - // TODO: should we verify if the transfer is valid and destined for this wallet? - pub(super) fn process_signed_transaction(&mut self, transfer: SignedTransaction) { - let spent_unique_pubkeys: HashSet<_> = transfer - .spends - .iter() - .map(|spend| spend.unique_pubkey()) - .collect(); - - self.mark_cash_notes_as_spent(spent_unique_pubkeys); - - if let Some(cash_note) = transfer.change_cashnote { - let _ = self.deposit_cash_note(cash_note); - } - } -} diff --git a/autonomi/tests/common/mod.rs b/autonomi/tests/common/mod.rs deleted file mode 100644 index 010543b566..0000000000 --- a/autonomi/tests/common/mod.rs +++ /dev/null @@ -1,26 +0,0 @@ -#![allow(dead_code)] - -use bytes::Bytes; -use rand::Rng; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_transfers::{get_faucet_data_dir, HotWallet}; - -/// When launching a testnet locally, we can use the faucet wallet. -pub fn load_hot_wallet_from_faucet() -> HotWallet { - let root_dir = get_faucet_data_dir(); - load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .expect("faucet wallet should be available for tests") -} - -pub fn gen_random_data(len: usize) -> Bytes { - let mut data = vec![0u8; len]; - rand::thread_rng().fill(&mut data[..]); - Bytes::from(data) -} - -/// Enable logging for tests. E.g. use `RUST_LOG=autonomi` to see logs. -pub fn enable_logging() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); -} diff --git a/autonomi/tests/evm/file.rs b/autonomi/tests/evm/file.rs new file mode 100644 index 0000000000..7b16217b97 --- /dev/null +++ b/autonomi/tests/evm/file.rs @@ -0,0 +1,86 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#[cfg(feature = "evm-payments")] +mod test { + use autonomi::Client; + use bytes::Bytes; + use eyre::bail; + use std::time::Duration; + use test_utils::evm::get_funded_wallet; + use tokio::time::sleep; + + #[tokio::test] + async fn file() -> Result<(), Box> { + let _log_appender_guard = + sn_logging::LogBuilder::init_single_threaded_tokio_test("file", false); + + let mut client = Client::connect(&[]).await.unwrap(); + let mut wallet = get_funded_wallet(); + + // let data = common::gen_random_data(1024 * 1024 * 1000); + // let user_key = common::gen_random_data(32); + + let (root, addr) = client + .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .await?; + + sleep(Duration::from_secs(10)).await; + + let root_fetched = client.fetch_root(addr).await?; + + assert_eq!( + root.map, root_fetched.map, + "root fetched should match root put" + ); + + Ok(()) + } + + #[cfg(feature = "vault")] + #[tokio::test] + async fn file_into_vault() -> eyre::Result<()> { + common::enable_logging(); + + let mut client = Client::connect(&[]) + .await? + .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + + let mut wallet = get_funded_wallet(); + + let (root, addr) = client + .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .await?; + sleep(Duration::from_secs(2)).await; + + let root_fetched = client.fetch_root(addr).await?; + + assert_eq!( + root.map, root_fetched.map, + "root fetched should match root put" + ); + + // now assert over the stored account packet + let new_client = Client::connect(&[]) + .await? + .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + + if let Some(ap) = new_client.fetch_and_decrypt_vault().await? { + let ap_root_fetched = Client::deserialise_root(ap)?; + + assert_eq!( + root.map, ap_root_fetched.map, + "root fetched should match root put" + ); + } else { + bail!("No account packet found"); + } + + Ok(()) + } +} diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs deleted file mode 100644 index 0e221ae6c7..0000000000 --- a/autonomi/tests/file.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::time::Duration; - -use autonomi::Client; -use tokio::time::sleep; - -mod common; - -#[cfg(feature = "files")] -#[tokio::test] -async fn file() -> Result<(), Box> { - common::enable_logging(); - - let mut client = Client::connect(&[]).await?; - let mut wallet = common::load_hot_wallet_from_faucet(); - - // let data = common::gen_random_data(1024 * 1024 * 1000); - // let user_key = common::gen_random_data(32); - - let (root, addr) = client - .upload_from_dir("tests/file/test_dir".into(), &mut wallet) - .await?; - sleep(Duration::from_secs(10)).await; - - let root_fetched = client.fetch_root(addr).await?; - - assert_eq!( - root.map, root_fetched.map, - "root fetched should match root put" - ); - - Ok(()) -} diff --git a/autonomi/tests/fs.rs b/autonomi/tests/fs.rs new file mode 100644 index 0000000000..4c286725aa --- /dev/null +++ b/autonomi/tests/fs.rs @@ -0,0 +1,114 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#![cfg(feature = "fs")] + +use autonomi::Client; +use eyre::Result; +use sha2::{Digest, Sha256}; +use sn_logging::LogBuilder; +use std::fs::File; +use std::io::{BufReader, Read}; +use std::path::PathBuf; +use std::time::Duration; +use test_utils::{evm::get_funded_wallet, peers_from_env}; +use tokio::time::sleep; +use walkdir::WalkDir; + +// With a local evm network, and local network, run: +// EVM_NETWORK=local cargo test --features="fs,local" --package autonomi --test file +#[tokio::test] +async fn dir_upload_download() -> Result<()> { + let _log_appender_guard = + LogBuilder::init_single_threaded_tokio_test("dir_upload_download", false); + + let client = Client::connect(&peers_from_env()?).await?; + let wallet = get_funded_wallet(); + + let addr = client + .dir_upload(&PathBuf::from("tests/file/test_dir"), &wallet) + .await?; + + sleep(Duration::from_secs(10)).await; + + client + .dir_download(addr, &PathBuf::from("tests/file/test_dir_fetched")) + .await?; + + // compare the two directories + assert_eq!( + compute_dir_sha256("tests/file/test_dir")?, + compute_dir_sha256("tests/file/test_dir_fetched")?, + ); + Ok(()) +} + +fn compute_sha256(path: &str) -> Result { + let mut hasher = Sha256::new(); + let mut file = BufReader::new(File::open(path)?); + let mut buffer = [0; 1024]; + while let Ok(read_bytes) = file.read(&mut buffer) { + if read_bytes == 0 { + break; + } + hasher.update(&buffer[..read_bytes]); + } + Ok(format!("{:x}", hasher.finalize())) +} + +fn compute_dir_sha256(dir: &str) -> Result { + let mut hasher = Sha256::new(); + for entry in WalkDir::new(dir).into_iter().filter_map(|e| e.ok()) { + if entry.file_type().is_file() { + let sha = compute_sha256( + entry + .path() + .to_str() + .expect("Failed to convert path to string"), + )?; + hasher.update(sha.as_bytes()); + } + } + Ok(format!("{:x}", hasher.finalize())) +} + +#[cfg(feature = "vault")] +#[tokio::test] +async fn file_into_vault() -> Result<()> { + let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("file", false); + + let mut client = Client::connect(&peers_from_env()?).await?; + let mut wallet = get_funded_wallet(); + let client_sk = bls::SecretKey::random(); + + let addr = client + .dir_upload(&PathBuf::from("tests/file/test_dir"), &wallet) + .await?; + sleep(Duration::from_secs(2)).await; + + let archive = client.archive_get(addr).await?; + client + .write_bytes_to_vault(archive.into_bytes()?, &mut wallet, &client_sk) + .await?; + + // now assert over the stored account packet + let new_client = Client::connect(&[]).await?; + + if let Some(ap) = new_client.fetch_and_decrypt_vault(&client_sk).await? { + let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(&ap)?; + + assert_eq!( + archive.map, ap_archive_fetched.map, + "archive fetched should match archive put" + ); + } else { + eyre::bail!("No account packet found"); + } + + Ok(()) +} diff --git a/autonomi/tests/put.rs b/autonomi/tests/put.rs index 5c2111f36b..dbced37d00 100644 --- a/autonomi/tests/put.rs +++ b/autonomi/tests/put.rs @@ -1,25 +1,34 @@ -use std::time::Duration; +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#![cfg(feature = "data")] use autonomi::Client; +use eyre::Result; +use sn_logging::LogBuilder; +use std::time::Duration; +use test_utils::{evm::get_funded_wallet, gen_random_data, peers_from_env}; use tokio::time::sleep; -mod common; - -#[cfg(feature = "data")] #[tokio::test] -async fn put() { - common::enable_logging(); +async fn put() -> Result<()> { + let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("put", false); - let mut client = Client::connect(&[]).await.unwrap(); - let mut wallet = common::load_hot_wallet_from_faucet(); - let data = common::gen_random_data(1024 * 1024 * 10); + let client = Client::connect(&peers_from_env()?).await?; + let wallet = get_funded_wallet(); + let data = gen_random_data(1024 * 1024 * 10); - // let quote = client.quote(data.clone()).await.unwrap(); - // let payment = client.pay(quote, &mut wallet).await.unwrap(); - let addr = client.put(data.clone(), &mut wallet).await.unwrap(); + let addr = client.data_put(data.clone(), &wallet).await?; - sleep(Duration::from_secs(2)).await; + sleep(Duration::from_secs(10)).await; - let data_fetched = client.get(addr).await.unwrap(); + let data_fetched = client.data_get(addr).await?; assert_eq!(data, data_fetched, "data fetched should match data put"); + + Ok(()) } diff --git a/autonomi/tests/register.rs b/autonomi/tests/register.rs index 79cd00368d..bf88f831d8 100644 --- a/autonomi/tests/register.rs +++ b/autonomi/tests/register.rs @@ -1,48 +1,59 @@ -use std::time::Duration; +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#![cfg(feature = "registers")] use autonomi::Client; use bytes::Bytes; +use eyre::Result; +use rand::Rng; +use sn_logging::LogBuilder; +use std::time::Duration; +use test_utils::{evm::get_funded_wallet, peers_from_env}; use tokio::time::sleep; -use xor_name::XorName; -mod common; - -#[cfg(feature = "registers")] #[tokio::test] -async fn register() { - common::enable_logging(); +async fn register() -> Result<()> { + let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("register", false); - let mut client = Client::connect(&[]).await.unwrap(); - let mut wallet = common::load_hot_wallet_from_faucet(); + let client = Client::connect(&peers_from_env()?).await?; + let wallet = get_funded_wallet(); // Owner key of the register. let key = bls::SecretKey::random(); // Create a register with the value [1, 2, 3, 4] + let rand_name: String = rand::thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(10) + .map(char::from) + .collect(); let register = client - .create_register( - vec![1, 2, 3, 4].into(), - XorName::random(&mut rand::thread_rng()), - key.clone(), - &mut wallet, - ) + .register_create(vec![1, 2, 3, 4].into(), &rand_name, key.clone(), &wallet) .await .unwrap(); - sleep(Duration::from_secs(2)).await; + sleep(Duration::from_secs(10)).await; // Fetch the register again - let register = client.fetch_register(*register.address()).await.unwrap(); + let register = client.register_get(*register.address()).await.unwrap(); // Update the register with the value [5, 6, 7, 8] client - .update_register(register.clone(), vec![5, 6, 7, 8].into(), key) + .register_update(register.clone(), vec![5, 6, 7, 8].into(), key) .await .unwrap(); sleep(Duration::from_secs(2)).await; // Fetch and verify the register contains the updated value - let register = client.fetch_register(*register.address()).await.unwrap(); + let register = client.register_get(*register.address()).await.unwrap(); assert_eq!(register.values(), vec![Bytes::from(vec![5, 6, 7, 8])]); + + Ok(()) } diff --git a/autonomi/tests/wallet.rs b/autonomi/tests/wallet.rs new file mode 100644 index 0000000000..5d5be9301e --- /dev/null +++ b/autonomi/tests/wallet.rs @@ -0,0 +1,50 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use const_hex::traits::FromHex; +use sn_evm::get_evm_network_from_env; +use sn_evm::EvmWallet; +use sn_evm::{Amount, RewardsAddress}; +use sn_logging::LogBuilder; +use test_utils::evm::get_funded_wallet; + +#[tokio::test] +async fn from_private_key() { + let private_key = "0xdb1049e76a813c94be0df47ec3e20533ca676b1b9fef2ddbce9daa117e4da4aa"; + let network = + get_evm_network_from_env().expect("Could not get EVM network from environment variables"); + let wallet = EvmWallet::new_from_private_key(network, private_key).unwrap(); + + assert_eq!( + wallet.address(), + RewardsAddress::from_hex("0x69D5BF2Bc42bca8782b8D2b4FdfF2b1Fa7644Fe7").unwrap() + ) +} + +#[tokio::test] +async fn send_tokens() { + let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("wallet", false); + + let network = + get_evm_network_from_env().expect("Could not get EVM network from environment variables"); + let wallet = get_funded_wallet(); + + let receiving_wallet = EvmWallet::new_with_random_wallet(network); + + let initial_balance = receiving_wallet.balance_of_tokens().await.unwrap(); + + assert_eq!(initial_balance, Amount::from(0)); + + let _ = wallet + .transfer_tokens(receiving_wallet.address(), Amount::from(10)) + .await; + + let final_balance = receiving_wallet.balance_of_tokens().await.unwrap(); + + assert_eq!(final_balance, Amount::from(10)); +} diff --git a/autonomi/tests/wasm.rs b/autonomi/tests/wasm.rs new file mode 100644 index 0000000000..485193ea48 --- /dev/null +++ b/autonomi/tests/wasm.rs @@ -0,0 +1,51 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#![cfg(target_arch = "wasm32")] + +use std::time::Duration; + +use autonomi::Client; +use sn_networking::target_arch::sleep; +use test_utils::{evm::get_funded_wallet, gen_random_data, peers_from_env}; +use wasm_bindgen_test::*; + +wasm_bindgen_test_configure!(run_in_browser); + +#[wasm_bindgen_test] +async fn put() -> Result<(), Box> { + enable_logging_wasm("sn_networking,autonomi,wasm"); + + let client = Client::connect(&peers_from_env()?).await.unwrap(); + let wallet = get_funded_wallet(); + + let data = gen_random_data(1024 * 1024 * 2); // 2MiB + let addr = client.put(data.clone(), &wallet).await.unwrap(); + + sleep(Duration::from_secs(2)).await; + + let data_fetched = client.get(addr).await.unwrap(); + assert_eq!(data, data_fetched, "data fetched should match data put"); + + Ok(()) +} + +fn enable_logging_wasm(directive: impl AsRef) { + use tracing_subscriber::prelude::*; + + console_error_panic_hook::set_once(); + + let fmt_layer = tracing_subscriber::fmt::layer() + .with_ansi(false) // Only partially supported across browsers + .without_time() // std::time is not available in browsers + .with_writer(tracing_web::MakeWebConsoleWriter::new()); // write events to the console + tracing_subscriber::registry() + .with(fmt_layer) + .with(tracing_subscriber::EnvFilter::new(directive)) + .init(); +} diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml new file mode 100644 index 0000000000..6712604130 --- /dev/null +++ b/evm_testnet/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors = ["MaidSafe Developers "] +description = "Safe Network EVM" +edition = "2021" +homepage = "https://maidsafe.net" +license = "GPL-3.0" +name = "evm_testnet" +repository = "https://github.com/maidsafe/safe_network" +version = "0.1.1" + +[dependencies] +clap = { version = "4.5", features = ["derive"] } +dirs-next = "~2.0.0" +evmlib = { path = "../evmlib", version = "0.1.1" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } +tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } + +[lints] +workspace = true diff --git a/evm_testnet/README.md b/evm_testnet/README.md new file mode 100644 index 0000000000..c6b2b20820 --- /dev/null +++ b/evm_testnet/README.md @@ -0,0 +1,26 @@ +## EVM Testnet + +Tool to run a local Ethereum node that automatically deploys all Autonomi smart contracts. + +### Requirements + +1. Install Foundry to get access to Anvil nodes: https://book.getfoundry.sh/getting-started/installation + +### Usage + +```bash +cargo run --bin evm_testnet -- --genesis-wallet +``` + +Example output: + +``` +************************* +* Ethereum node started * +************************* +RPC URL: http://localhost:60093/ +Payment token address: 0x5FbDB2315678afecb367f032d93F642f64180aa3 +Chunk payments address: 0x8464135c8F25Da09e49BC8782676a84730C318bC +Deployer wallet private key: 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 +Genesis wallet balance: (tokens: 20000000000000000000000000, gas: 9998998011366954730202) +``` diff --git a/evm_testnet/src/main.rs b/evm_testnet/src/main.rs new file mode 100644 index 0000000000..9e7f5a9dfd --- /dev/null +++ b/evm_testnet/src/main.rs @@ -0,0 +1,180 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use clap::Parser; +use evmlib::common::{Address, Amount}; +use evmlib::testnet::Testnet; +use evmlib::wallet::{balance_of_gas_tokens, balance_of_tokens, Wallet}; +use std::str::FromStr; + +/// A tool to start a local Ethereum node. +#[derive(Debug, Parser)] +#[clap(version, author, verbatim_doc_comment)] +struct Args { + /// Wallet that will hold ~all gas funds and payment tokens. + #[clap(long, short)] + genesis_wallet: Option
, +} + +#[tokio::main] +async fn main() { + let args = Args::parse(); + start_node(args.genesis_wallet).await; +} + +async fn start_node(genesis_wallet: Option
) { + let testnet = Testnet::new().await; + + println!("*************************"); + println!("* Ethereum node started *"); + println!("*************************"); + + // Transfer all gas and payment tokens to the genesis wallet. + if let Some(genesis) = genesis_wallet { + transfer_funds(&testnet, genesis).await; + } + + let testnet_data = TestnetData::new(&testnet, genesis_wallet).await; + testnet_data.save_csv(); + testnet_data.print(); + keep_alive(testnet).await; + + println!("Ethereum node stopped."); + TestnetData::remove_csv(); +} + +async fn transfer_funds(testnet: &Testnet, genesis_wallet: Address) { + let wallet = + Wallet::new_from_private_key(testnet.to_network(), &testnet.default_wallet_private_key()) + .expect("Could not init deployer wallet"); + + let token_amount = wallet + .balance_of_tokens() + .await + .expect("Could not get balance of tokens"); + + // Transfer all payment tokens. + let _ = wallet.transfer_tokens(genesis_wallet, token_amount).await; + + let gas_amount = wallet + .balance_of_gas_tokens() + .await + .expect("Could not get balance of gas tokens"); + + let sub_amount = Amount::from_str("1000000000000000000").expect("Could not parse sub amount"); + + // Transfer almost all gas. Save some gas for this tx. + let _ = wallet + .transfer_gas_tokens(genesis_wallet, gas_amount - sub_amount) + .await; +} + +async fn keep_alive(variable: T) { + let _ = tokio::signal::ctrl_c().await; + println!("Received Ctrl-C, stopping..."); + drop(variable); +} + +#[derive(Debug)] +struct TestnetData { + rpc_url: String, + payment_token_address: String, + data_payments_address: String, + deployer_wallet_private_key: String, + tokens_and_gas: Option<(Amount, Amount)>, +} + +impl TestnetData { + async fn new(testnet: &Testnet, genesis_wallet: Option
) -> Self { + let network = testnet.to_network(); + + let tokens_and_gas = if let Some(genesis) = genesis_wallet { + let tokens = balance_of_tokens(genesis, &network) + .await + .unwrap_or(Amount::MIN); + + let gas = balance_of_gas_tokens(genesis, &network) + .await + .unwrap_or(Amount::MIN); + Some((tokens, gas)) + } else { + None + }; + Self { + rpc_url: network.rpc_url().to_string(), + payment_token_address: network.payment_token_address().to_string(), + data_payments_address: network.data_payments_address().to_string(), + deployer_wallet_private_key: testnet.default_wallet_private_key(), + tokens_and_gas, + } + } + + fn print(&self) { + println!("RPC URL: {}", self.rpc_url); + println!("Payment token address: {}", self.payment_token_address); + println!("Data payments address: {}", self.data_payments_address); + println!( + "Deployer wallet private key: {}", + self.deployer_wallet_private_key + ); + if let Some((tokens, gas)) = self.tokens_and_gas { + println!("Genesis wallet balance (atto): (tokens: {tokens}, gas: {gas})"); + } + + println!(); + println!("--------------"); + println!("Run the CLI or Node with the following env vars set to manually connect to this network:"); + println!( + "{}=\"{}\" {}=\"{}\" {}=\"{}\"", + sn_evm::RPC_URL, + self.rpc_url, + sn_evm::PAYMENT_TOKEN_ADDRESS, + self.payment_token_address, + sn_evm::DATA_PAYMENTS_ADDRESS, + self.data_payments_address + ); + println!("--------------"); + println!("For CLI operations that required a payment: use the deployer secret key by providing this env var:"); + println!("SECRET_KEY=\"{}\"", self.deployer_wallet_private_key); + println!("--------------"); + println!(); + } + + fn save_csv(&self) { + let csv_path = evmlib::utils::get_evm_testnet_csv_path() + .expect("Could not get data_dir to save evm testnet data"); + let path = csv_path + .parent() + .expect("Could not get parent dir of csv_path"); + if !path.exists() { + std::fs::create_dir_all(path).expect("Could not create safe directory"); + } + + let csv = format!( + "{},{},{},{}", + self.rpc_url, + self.payment_token_address, + self.data_payments_address, + self.deployer_wallet_private_key + ); + std::fs::write(&csv_path, csv).expect("Could not write to evm_testnet_data.csv file"); + println!("EVM testnet data saved to: {csv_path:?}"); + println!("When running the Node or CLI with --feature=local, it will automatically use this network by loading the EVM Network's info from the CSV file."); + println!(); + } + + fn remove_csv() { + let csv_path = evmlib::utils::get_evm_testnet_csv_path() + .expect("Could not get data_dir to remove evm testnet data"); + if csv_path.exists() { + std::fs::remove_file(&csv_path).expect("Could not remove evm_testnet_data.csv file"); + } else { + eprintln!("No EVM testnet data CSV file found to remove"); + } + } +} diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml new file mode 100644 index 0000000000..fa5b2d9769 --- /dev/null +++ b/evmlib/Cargo.toml @@ -0,0 +1,29 @@ +[package] +authors = ["MaidSafe Developers "] +description = "Safe Network EVM" +edition = "2021" +homepage = "https://maidsafe.net" +license = "GPL-3.0" +name = "evmlib" +repository = "https://github.com/maidsafe/safe_network" +version = "0.1.1" + +[features] +wasm-bindgen = ["alloy/wasm-bindgen"] +local = [] + +[dependencies] +alloy = { version = "0.4.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } +dirs-next = "~2.0.0" +serde = "1.0" +serde_with = { version = "3.11.0", features = ["macros"] } +thiserror = "1.0" +tracing = { version = "~0.1.26" } +tokio = "1.38.0" +rand = "0.8.5" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +getrandom = { version = "0.2.12", features = ["js"] } + +[lints] +workspace = true diff --git a/evmlib/README.md b/evmlib/README.md new file mode 100644 index 0000000000..b0ee569fdd --- /dev/null +++ b/evmlib/README.md @@ -0,0 +1,3 @@ +## Testing + +1. Install Foundry to get access to Anvil nodes: https://book.getfoundry.sh/getting-started/installation diff --git a/evmlib/artifacts/AutonomiNetworkToken.json b/evmlib/artifacts/AutonomiNetworkToken.json new file mode 100644 index 0000000000..b075133e1c --- /dev/null +++ b/evmlib/artifacts/AutonomiNetworkToken.json @@ -0,0 +1,897 @@ +{ + "_format": "hh-sol-artifact-1", + "contractName": "AutonomiNetworkToken", + "sourceName": "contracts/AutonomiNetworkToken.sol", + "abi": [ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "CheckpointUnorderedInsertion", + "type": "error" + }, + { + "inputs": [], + "name": "ECDSAInvalidSignature", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "length", + "type": "uint256" + } + ], + "name": "ECDSAInvalidSignatureLength", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "ECDSAInvalidSignatureS", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "increasedSupply", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "cap", + "type": "uint256" + } + ], + "name": "ERC20ExceededSafeSupply", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "allowance", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "needed", + "type": "uint256" + } + ], + "name": "ERC20InsufficientAllowance", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "needed", + "type": "uint256" + } + ], + "name": "ERC20InsufficientBalance", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "approver", + "type": "address" + } + ], + "name": "ERC20InvalidApprover", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "receiver", + "type": "address" + } + ], + "name": "ERC20InvalidReceiver", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "ERC20InvalidSender", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "ERC20InvalidSpender", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + } + ], + "name": "ERC2612ExpiredSignature", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "signer", + "type": "address" + }, + { + "internalType": "address", + "name": "owner", + "type": "address" + } + ], + "name": "ERC2612InvalidSigner", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "timepoint", + "type": "uint256" + }, + { + "internalType": "uint48", + "name": "clock", + "type": "uint48" + } + ], + "name": "ERC5805FutureLookup", + "type": "error" + }, + { + "inputs": [], + "name": "ERC6372InconsistentClock", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "internalType": "uint256", + "name": "currentNonce", + "type": "uint256" + } + ], + "name": "InvalidAccountNonce", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidShortString", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint8", + "name": "bits", + "type": "uint8" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "SafeCastOverflowedUintDowncast", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "string", + "name": "str", + "type": "string" + } + ], + "name": "StringTooLong", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "expiry", + "type": "uint256" + } + ], + "name": "VotesExpiredSignature", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "delegator", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "fromDelegate", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "toDelegate", + "type": "address" + } + ], + "name": "DelegateChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "delegate", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "previousVotes", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "newVotes", + "type": "uint256" + } + ], + "name": "DelegateVotesChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [], + "name": "EIP712DomainChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + }, + { + "inputs": [], + "name": "CLOCK_MODE", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "DOMAIN_SEPARATOR", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "burn", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "burnFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "internalType": "uint32", + "name": "pos", + "type": "uint32" + } + ], + "name": "checkpoints", + "outputs": [ + { + "components": [ + { + "internalType": "uint48", + "name": "_key", + "type": "uint48" + }, + { + "internalType": "uint208", + "name": "_value", + "type": "uint208" + } + ], + "internalType": "struct Checkpoints.Checkpoint208", + "name": "", + "type": "tuple" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "clock", + "outputs": [ + { + "internalType": "uint48", + "name": "", + "type": "uint48" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "decimals", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "delegatee", + "type": "address" + } + ], + "name": "delegate", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "delegatee", + "type": "address" + }, + { + "internalType": "uint256", + "name": "nonce", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "expiry", + "type": "uint256" + }, + { + "internalType": "uint8", + "name": "v", + "type": "uint8" + }, + { + "internalType": "bytes32", + "name": "r", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "delegateBySig", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "delegates", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "eip712Domain", + "outputs": [ + { + "internalType": "bytes1", + "name": "fields", + "type": "bytes1" + }, + { + "internalType": "string", + "name": "name", + "type": "string" + }, + { + "internalType": "string", + "name": "version", + "type": "string" + }, + { + "internalType": "uint256", + "name": "chainId", + "type": "uint256" + }, + { + "internalType": "address", + "name": "verifyingContract", + "type": "address" + }, + { + "internalType": "bytes32", + "name": "salt", + "type": "bytes32" + }, + { + "internalType": "uint256[]", + "name": "extensions", + "type": "uint256[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "timepoint", + "type": "uint256" + } + ], + "name": "getPastTotalSupply", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "internalType": "uint256", + "name": "timepoint", + "type": "uint256" + } + ], + "name": "getPastVotes", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "getVotes", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + } + ], + "name": "nonces", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "numCheckpoints", + "outputs": [ + { + "internalType": "uint32", + "name": "", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + }, + { + "internalType": "uint8", + "name": "v", + "type": "uint8" + }, + { + "internalType": "bytes32", + "name": "r", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "permit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } + ], + "bytecode": "0x6101606040523480156200001257600080fd5b506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e00000000000000000000000081525080604051806040016040528060018152602001603160f81b8152506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e0000000000000000000000008152506040518060400160405280600381526020016210539560ea1b8152508160039081620000c79190620009b5565b506004620000d68282620009b5565b50620000e891508390506005620001c0565b61012052620000f9816006620001c0565b61014052815160208084019190912060e052815190820120610100524660a0526200018760e05161010051604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201529081019290925260608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b60805250503060c05250620001ba33620001a46012600a62000b94565b620001b4906301312d0062000ba5565b620001f9565b62000cae565b6000602083511015620001e057620001d8836200023b565b9050620001f3565b81620001ed8482620009b5565b5060ff90505b92915050565b6001600160a01b038216620002295760405163ec442f0560e01b8152600060048201526024015b60405180910390fd5b62000237600083836200027e565b5050565b600080829050601f8151111562000269578260405163305a27a960e01b815260040162000220919062000bbf565b8051620002768262000c10565b179392505050565b6200028b83838362000290565b505050565b6200029d838383620002ff565b6001600160a01b038316620002f2576000620002b860025490565b90506001600160d01b0380821115620002ef57604051630e58ae9360e11b8152600481018390526024810182905260440162000220565b50505b6200028b83838362000432565b6001600160a01b0383166200032e57806002600082825462000322919062000c35565b90915550620003a29050565b6001600160a01b03831660009081526020819052604090205481811015620003835760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640162000220565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b038216620003c057600280548290039055620003df565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040516200042591815260200190565b60405180910390a3505050565b6001600160a01b038316620004675762000464600a62000953620004ca60201b176200045e84620004df565b62000519565b50505b6001600160a01b038216620004965762000493600a6200095f6200055660201b176200045e84620004df565b50505b6001600160a01b038381166000908152600860205260408082205485841683529120546200028b9291821691168362000564565b6000620004d8828462000c4b565b9392505050565b60006001600160d01b0382111562000515576040516306dfcc6560e41b815260d060048201526024810183905260440162000220565b5090565b600080620005496200052a620006cb565b620005406200053988620006dc565b868860201c565b8791906200072b565b915091505b935093915050565b6000620004d8828462000c75565b816001600160a01b0316836001600160a01b031614158015620005875750600081115b156200028b576001600160a01b038316156200062a576001600160a01b038316600090815260096020908152604082208291620005d5919062000556901b6200095f176200045e86620004df565b6001600160d01b031691506001600160d01b03169150846001600160a01b031660008051602062002bda83398151915283836040516200061f929190918252602082015260400190565b60405180910390a250505b6001600160a01b038216156200028b576001600160a01b038216600090815260096020908152604082208291620006729190620004ca901b62000953176200045e86620004df565b6001600160d01b031691506001600160d01b03169150836001600160a01b031660008051602062002bda8339815191528383604051620006bc929190918252602082015260400190565b60405180910390a25050505050565b6000620006d76200073b565b905090565b8054600090801562000722576200070883620006fa60018462000c98565b600091825260209091200190565b54660100000000000090046001600160d01b0316620004d8565b60009392505050565b6000806200054985858562000748565b6000620006d743620008da565b8254600090819080156200087b5760006200076a87620006fa60018562000c98565b60408051808201909152905465ffffffffffff80821680845266010000000000009092046001600160d01b031660208401529192509087161015620007c257604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff808816911603620008165784620007e988620006fa60018662000c98565b80546001600160d01b039290921666010000000000000265ffffffffffff9092169190911790556200086a565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d815291909120945191519092166601000000000000029216919091179101555b6020015192508391506200054e9050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a81529182209551925190931666010000000000000291909316179201919091559050816200054e565b600065ffffffffffff82111562000515576040516306dfcc6560e41b8152603060048201526024810183905260440162000220565b634e487b7160e01b600052604160045260246000fd5b600181811c908216806200093a57607f821691505b6020821081036200095b57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200028b576000816000526020600020601f850160051c810160208610156200098c5750805b601f850160051c820191505b81811015620009ad5782815560010162000998565b505050505050565b81516001600160401b03811115620009d157620009d16200090f565b620009e981620009e2845462000925565b8462000961565b602080601f83116001811462000a21576000841562000a085750858301515b600019600386901b1c1916600185901b178555620009ad565b600085815260208120601f198616915b8281101562000a525788860151825594840194600190910190840162000a31565b508582101562000a715787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b634e487b7160e01b600052601160045260246000fd5b600181815b8085111562000ad857816000190482111562000abc5762000abc62000a81565b8085161562000aca57918102915b93841c939080029062000a9c565b509250929050565b60008262000af157506001620001f3565b8162000b0057506000620001f3565b816001811462000b19576002811462000b245762000b44565b6001915050620001f3565b60ff84111562000b385762000b3862000a81565b50506001821b620001f3565b5060208310610133831016604e8410600b841016171562000b69575081810a620001f3565b62000b75838362000a97565b806000190482111562000b8c5762000b8c62000a81565b029392505050565b6000620004d860ff84168362000ae0565b8082028115828204841417620001f357620001f362000a81565b60006020808352835180602085015260005b8181101562000bef5785810183015185820160400152820162000bd1565b506000604082860101526040601f19601f8301168501019250505092915050565b805160208083015191908110156200095b5760001960209190910360031b1b16919050565b80820180821115620001f357620001f362000a81565b6001600160d01b0381811683821601908082111562000c6e5762000c6e62000a81565b5092915050565b6001600160d01b0382811682821603908082111562000c6e5762000c6e62000a81565b81810381811115620001f357620001f362000a81565b60805160a05160c05160e051610100516101205161014051611ed162000d096000396000610d9901526000610d6c01526000610b3401526000610b0c01526000610a6701526000610a9101526000610abb0152611ed16000f3fe608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220ed8af9c04c0db3fd29db1bfe40925e4a5564caf35ad274b7ecb6f7d723229caf64736f6c63430008180033dec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a724", + "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220ed8af9c04c0db3fd29db1bfe40925e4a5564caf35ad274b7ecb6f7d723229caf64736f6c63430008180033", + "linkReferences": {}, + "deployedLinkReferences": {} +} diff --git a/evmlib/artifacts/DataPayments.json b/evmlib/artifacts/DataPayments.json new file mode 100644 index 0000000000..a72afa0b8b --- /dev/null +++ b/evmlib/artifacts/DataPayments.json @@ -0,0 +1,90 @@ +{ + "_format": "hh-sol-artifact-1", + "contractName": "DataPayments", + "sourceName": "contracts/DataPayments.sol", + "abi": [ + { + "inputs": [ + { + "internalType": "address", + "name": "_tokenAddress", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "rewardsAddress", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "quoteHash", + "type": "bytes32" + } + ], + "name": "DataPaymentMade", + "type": "event" + }, + { + "inputs": [], + "name": "PAYMENT_TOKEN_ADDRESS", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "address", + "name": "rewardsAddress", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "quoteHash", + "type": "bytes32" + } + ], + "internalType": "struct DataPayments.DataPayment[]", + "name": "dataPayments", + "type": "tuple[]" + } + ], + "name": "submitDataPayments", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } + ], + "bytecode": "0x60a060405234801561001057600080fd5b506040516105f73803806105f783398101604081905261002f916100a6565b6001600160a01b0381166100955760405162461bcd60e51b8152602060048201526024808201527f546f6b656e20616464726573732063616e6e6f74206265207a65726f206164646044820152637265737360e01b606482015260840160405180910390fd5b6001600160a01b03166080526100d6565b6000602082840312156100b857600080fd5b81516001600160a01b03811681146100cf57600080fd5b9392505050565b6080516104f26101056000396000818160400152818161015101528181610253015261035301526104f26000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80635c0d32861461003b578063dee1dfa01461007e575b600080fd5b6100627f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b61009161008c3660046103c6565b610093565b005b60005b8181101561012b57368383838181106100b1576100b161043b565b6060029190910191506100d79050336100cd6020840184610451565b8360200135610130565b6040810135602082018035906100ed9084610451565b6001600160a01b03167ff998960b1c6f0e0e89b7bbe6b6fbf3e03e6f08eee5b8430877d8adb8e149d58060405160405180910390a450600101610096565b505050565b6040516370a0823160e01b81526001600160a01b03848116600483015282917f0000000000000000000000000000000000000000000000000000000000000000909116906370a0823190602401602060405180830381865afa15801561019a573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906101be9190610481565b101561021c5760405162461bcd60e51b815260206004820152602260248201527f57616c6c657420646f6573206e6f74206861766520656e6f75676820746f6b656044820152616e7360f01b60648201526084015b60405180910390fd5b6001600160a01b038316301461032557604051636eb1769f60e11b81526001600160a01b03848116600483015230602483015282917f00000000000000000000000000000000000000000000000000000000000000009091169063dd62ed3e90604401602060405180830381865afa15801561029c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102c09190610481565b10156103255760405162461bcd60e51b815260206004820152602e60248201527f436f6e7472616374206973206e6f7420616c6c6f77656420746f207370656e6460448201526d20656e6f75676820746f6b656e7360901b6064820152608401610213565b6040516323b872dd60e01b81526001600160a01b0384811660048301528381166024830152604482018390527f000000000000000000000000000000000000000000000000000000000000000016906323b872dd906064016020604051808303816000875af115801561039c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103c0919061049a565b50505050565b600080602083850312156103d957600080fd5b823567ffffffffffffffff808211156103f157600080fd5b818501915085601f83011261040557600080fd5b81358181111561041457600080fd5b86602060608302850101111561042957600080fd5b60209290920196919550909350505050565b634e487b7160e01b600052603260045260246000fd5b60006020828403121561046357600080fd5b81356001600160a01b038116811461047a57600080fd5b9392505050565b60006020828403121561049357600080fd5b5051919050565b6000602082840312156104ac57600080fd5b8151801515811461047a57600080fdfea26469706673582212206f3a305284dc687832455d7d49b202dcf22b32d76aff5ccd14c3c8539596bcf464736f6c63430008180033", + "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106100365760003560e01c80635c0d32861461003b578063dee1dfa01461007e575b600080fd5b6100627f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b61009161008c3660046103c6565b610093565b005b60005b8181101561012b57368383838181106100b1576100b161043b565b6060029190910191506100d79050336100cd6020840184610451565b8360200135610130565b6040810135602082018035906100ed9084610451565b6001600160a01b03167ff998960b1c6f0e0e89b7bbe6b6fbf3e03e6f08eee5b8430877d8adb8e149d58060405160405180910390a450600101610096565b505050565b6040516370a0823160e01b81526001600160a01b03848116600483015282917f0000000000000000000000000000000000000000000000000000000000000000909116906370a0823190602401602060405180830381865afa15801561019a573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906101be9190610481565b101561021c5760405162461bcd60e51b815260206004820152602260248201527f57616c6c657420646f6573206e6f74206861766520656e6f75676820746f6b656044820152616e7360f01b60648201526084015b60405180910390fd5b6001600160a01b038316301461032557604051636eb1769f60e11b81526001600160a01b03848116600483015230602483015282917f00000000000000000000000000000000000000000000000000000000000000009091169063dd62ed3e90604401602060405180830381865afa15801561029c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102c09190610481565b10156103255760405162461bcd60e51b815260206004820152602e60248201527f436f6e7472616374206973206e6f7420616c6c6f77656420746f207370656e6460448201526d20656e6f75676820746f6b656e7360901b6064820152608401610213565b6040516323b872dd60e01b81526001600160a01b0384811660048301528381166024830152604482018390527f000000000000000000000000000000000000000000000000000000000000000016906323b872dd906064016020604051808303816000875af115801561039c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103c0919061049a565b50505050565b600080602083850312156103d957600080fd5b823567ffffffffffffffff808211156103f157600080fd5b818501915085601f83011261040557600080fd5b81358181111561041457600080fd5b86602060608302850101111561042957600080fd5b60209290920196919550909350505050565b634e487b7160e01b600052603260045260246000fd5b60006020828403121561046357600080fd5b81356001600160a01b038116811461047a57600080fd5b9392505050565b60006020828403121561049357600080fd5b5051919050565b6000602082840312156104ac57600080fd5b8151801515811461047a57600080fdfea26469706673582212206f3a305284dc687832455d7d49b202dcf22b32d76aff5ccd14c3c8539596bcf464736f6c63430008180033", + "linkReferences": {}, + "deployedLinkReferences": {} +} \ No newline at end of file diff --git a/evmlib/src/common.rs b/evmlib/src/common.rs new file mode 100644 index 0000000000..af210f9285 --- /dev/null +++ b/evmlib/src/common.rs @@ -0,0 +1,18 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use alloy::primitives::FixedBytes; + +pub type Address = alloy::primitives::Address; +pub type Hash = FixedBytes<32>; +pub type TxHash = alloy::primitives::TxHash; +pub type U256 = alloy::primitives::U256; +pub type QuoteHash = Hash; +pub type Amount = U256; +pub type QuotePayment = (QuoteHash, Address, Amount); +pub type EthereumWallet = alloy::network::EthereumWallet; diff --git a/evmlib/src/contract/data_payments/error.rs b/evmlib/src/contract/data_payments/error.rs new file mode 100644 index 0000000000..95ec1c1c27 --- /dev/null +++ b/evmlib/src/contract/data_payments/error.rs @@ -0,0 +1,24 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::contract::network_token; +use alloy::transports::{RpcError, TransportErrorKind}; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + ContractError(#[from] alloy::contract::Error), + #[error(transparent)] + RpcError(#[from] RpcError), + #[error(transparent)] + NetworkTokenError(#[from] network_token::Error), + #[error(transparent)] + PendingTransactionError(#[from] alloy::providers::PendingTransactionError), + #[error("The transfer limit of 256 has been exceeded")] + TransferLimitExceeded, +} diff --git a/evmlib/src/contract/data_payments/mod.rs b/evmlib/src/contract/data_payments/mod.rs new file mode 100644 index 0000000000..352f294581 --- /dev/null +++ b/evmlib/src/contract/data_payments/mod.rs @@ -0,0 +1,99 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +pub mod error; + +use crate::common; +use crate::common::{Address, TxHash}; +use crate::contract::data_payments::error::Error; +use crate::contract::data_payments::DataPaymentsContract::DataPaymentsContractInstance; +use alloy::providers::{Network, Provider}; +use alloy::sol; +use alloy::transports::Transport; + +/// The max amount of transfers within one data payments transaction. +pub const MAX_TRANSFERS_PER_TRANSACTION: usize = 512; + +sol!( + #[allow(clippy::too_many_arguments)] + #[allow(missing_docs)] + #[sol(rpc)] + DataPaymentsContract, + "artifacts/DataPayments.json" +); + +pub struct DataPaymentsHandler, N: Network> { + pub contract: DataPaymentsContractInstance, +} + +impl DataPaymentsHandler +where + T: Transport + Clone, + P: Provider, + N: Network, +{ + /// Create a new ChunkPayments contract instance. + pub fn new(contract_address: Address, provider: P) -> Self { + let contract = DataPaymentsContract::new(contract_address, provider); + DataPaymentsHandler { contract } + } + + /// Deploys the ChunkPayments smart contract to the network of the provider. + /// ONLY DO THIS IF YOU KNOW WHAT YOU ARE DOING! + pub async fn deploy(provider: P, payment_token_address: Address) -> Self { + let contract = DataPaymentsContract::deploy(provider, payment_token_address) + .await + .expect("Could not deploy contract"); + + DataPaymentsHandler { contract } + } + + pub fn set_provider(&mut self, provider: P) { + let address = *self.contract.address(); + self.contract = DataPaymentsContract::new(address, provider); + } + + /// Pay for quotes. + /// Input: (quote_id, reward_address, amount). + pub async fn pay_for_quotes>( + &self, + data_payments: I, + ) -> Result { + let data_payments: Vec = data_payments + .into_iter() + .map(|(hash, addr, amount)| DataPayments::DataPayment { + rewardsAddress: addr, + amount, + quoteHash: hash, + }) + .collect(); + + if data_payments.len() > MAX_TRANSFERS_PER_TRANSACTION { + error!( + "Data payments limit exceeded: {} > {}", + data_payments.len(), + MAX_TRANSFERS_PER_TRANSACTION + ); + return Err(Error::TransferLimitExceeded); + } + + let tx_hash = self + .contract + .submitDataPayments(data_payments) + .send() + .await + .inspect_err(|e| error!("Failed to submit data payments during pay_for_quotes: {e:?}"))? + .watch() + .await + .inspect_err(|e| { + error!("Failed to watch data payments during pay_for_quotes: {e:?}") + })?; + + Ok(tx_hash) + } +} diff --git a/evmlib/src/contract/mod.rs b/evmlib/src/contract/mod.rs new file mode 100644 index 0000000000..d428880800 --- /dev/null +++ b/evmlib/src/contract/mod.rs @@ -0,0 +1,10 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +pub mod data_payments; +pub mod network_token; diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs new file mode 100644 index 0000000000..ce582f2543 --- /dev/null +++ b/evmlib/src/contract/network_token.rs @@ -0,0 +1,132 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::common::{Address, TxHash, U256}; +use crate::contract::network_token::NetworkTokenContract::NetworkTokenContractInstance; +use alloy::providers::{Network, Provider}; +use alloy::sol; +use alloy::transports::{RpcError, Transport, TransportErrorKind}; + +sol!( + #[allow(clippy::too_many_arguments)] + #[allow(missing_docs)] + #[sol(rpc)] + NetworkTokenContract, + "artifacts/AutonomiNetworkToken.json" +); + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + ContractError(#[from] alloy::contract::Error), + #[error(transparent)] + RpcError(#[from] RpcError), + #[error(transparent)] + PendingTransactionError(#[from] alloy::providers::PendingTransactionError), +} + +pub struct NetworkToken, N: Network> { + pub contract: NetworkTokenContractInstance, +} + +impl NetworkToken +where + T: Transport + Clone, + P: Provider, + N: Network, +{ + /// Create a new NetworkToken contract instance. + pub fn new(contract_address: Address, provider: P) -> Self { + let contract = NetworkTokenContract::new(contract_address, provider); + NetworkToken { contract } + } + + /// Deploys the AutonomiNetworkToken smart contract to the network of the provider. + /// ONLY DO THIS IF YOU KNOW WHAT YOU ARE DOING! + pub async fn deploy(provider: P) -> Self { + let contract = NetworkTokenContract::deploy(provider) + .await + .expect("Could not deploy contract"); + NetworkToken { contract } + } + + pub fn set_provider(&mut self, provider: P) { + let address = *self.contract.address(); + self.contract = NetworkTokenContract::new(address, provider); + } + + /// Get the raw token balance of an address. + pub async fn balance_of(&self, account: Address) -> Result { + debug!("Getting balance of account: {account:?}"); + let balance = self + .contract + .balanceOf(account) + .call() + .await + .inspect_err(|err| error!("Error getting balance of account: {err:?}"))? + ._0; + debug!("Balance of account: {account} is {balance}"); + Ok(balance) + } + + /// See how many tokens are approved to be spent. + pub async fn allowance(&self, owner: Address, spender: Address) -> Result { + debug!("Getting allowance of owner: {owner} for spender: {spender}",); + let balance = self + .contract + .allowance(owner, spender) + .call() + .await + .inspect_err(|err| error!("Error getting allowance: {err:?}"))? + ._0; + debug!("Allowance of owner: {owner} for spender: {spender} is: {balance}"); + Ok(balance) + } + + /// Approve spender to spend a raw amount of tokens. + pub async fn approve(&self, spender: Address, value: U256) -> Result { + debug!("Approving spender to spend raw amt of tokens: {value}"); + let call = self.contract.approve(spender, value); + let pending_tx_builder = call.send().await.inspect_err(|err| { + error!( + "Error approving spender {spender:?} to spend raw amt of tokens {value}: {err:?}" + ) + })?; + + let pending_tx_hash = *pending_tx_builder.tx_hash(); + debug!("The approval from sender {spender:?} is pending with tx_hash: {pending_tx_hash:?}",); + let tx_hash = pending_tx_builder.watch().await.inspect_err(|err| { + error!("Error watching approve tx with hash {pending_tx_hash:?}: {err:?}") + })?; + + debug!("Approve tx with hash {tx_hash:?} is successful"); + + Ok(tx_hash) + } + + /// Transfer a raw amount of tokens. + pub async fn transfer(&self, receiver: Address, amount: U256) -> Result { + debug!("Transferring raw amt of tokens: {amount} to {receiver:?}"); + let call = self.contract.transfer(receiver, amount); + let pending_tx_builder = call.send().await.inspect_err(|err| { + error!("Error transferring raw amt of tokens to {receiver:?}: {err:?}") + })?; + + let pending_tx_hash = *pending_tx_builder.tx_hash(); + debug!( + "The transfer to receiver {receiver:?} is pending with tx_hash: {pending_tx_hash:?}" + ); + let tx_hash = pending_tx_builder.watch().await.inspect_err(|err| { + error!("Error watching transfer tx with hash {pending_tx_hash:?}: {err:?}") + })?; + + debug!("Transfer tx with hash {tx_hash:?} is successful"); + + Ok(tx_hash) + } +} diff --git a/sn_cli/src/lib.rs b/evmlib/src/cryptography.rs similarity index 67% rename from sn_cli/src/lib.rs rename to evmlib/src/cryptography.rs index 4d0e77b41e..ddc0149b43 100644 --- a/sn_cli/src/lib.rs +++ b/evmlib/src/cryptography.rs @@ -6,12 +6,10 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -mod acc_packet; -mod files; -pub mod utils; +use crate::common::Hash; +use alloy::primitives::keccak256; -pub use acc_packet::AccountPacket; -pub use files::{ - download_file, download_files, ChunkManager, Estimator, FilesUploadStatusNotifier, - FilesUploadSummary, FilesUploader, UploadedFile, UPLOADED_FILES, -}; +/// Hash data using Keccak256. +pub fn hash>(data: T) -> Hash { + keccak256(data.as_ref()) +} diff --git a/evmlib/src/event.rs b/evmlib/src/event.rs new file mode 100644 index 0000000000..5cdda3d91e --- /dev/null +++ b/evmlib/src/event.rs @@ -0,0 +1,71 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::common::{Address, Hash, U256}; +use alloy::primitives::{b256, FixedBytes}; +use alloy::rpc::types::Log; + +// Should be updated when the smart contract changes! +pub(crate) const DATA_PAYMENT_EVENT_SIGNATURE: FixedBytes<32> = + b256!("f998960b1c6f0e0e89b7bbe6b6fbf3e03e6f08eee5b8430877d8adb8e149d580"); // DevSkim: ignore DS173237 + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Topics amount is unexpected. Was expecting 4")] + TopicsAmountUnexpected, + #[error("Event signature is missing")] + EventSignatureMissing, + #[error("Event signature does not match")] + EventSignatureDoesNotMatch, +} + +/// Struct for the ChunkPaymentEvent emitted by the ChunkPayments smart contract. +#[derive(Debug)] +pub(crate) struct ChunkPaymentEvent { + pub rewards_address: Address, + pub amount: U256, + pub quote_hash: Hash, +} + +impl TryFrom for ChunkPaymentEvent { + type Error = Error; + + fn try_from(log: Log) -> Result { + // Verify the amount of topics + if log.topics().len() != 4 { + error!("Topics amount is unexpected. Was expecting 4"); + return Err(Error::TopicsAmountUnexpected); + } + + let topic0 = log + .topics() + .first() + .ok_or(Error::EventSignatureMissing) + .inspect_err(|_| error!("Event signature is missing"))?; + + // Verify the event signature + if topic0 != &DATA_PAYMENT_EVENT_SIGNATURE { + error!( + "Event signature does not match. Expected: {:?}, got: {:?}", + DATA_PAYMENT_EVENT_SIGNATURE, topic0 + ); + return Err(Error::EventSignatureDoesNotMatch); + } + + // Extract the data + let rewards_address = Address::from_slice(&log.topics()[1][12..]); + let amount = U256::from_be_slice(&log.topics()[2][12..]); + let quote_hash = Hash::from_slice(log.topics()[3].as_slice()); + + Ok(Self { + rewards_address, + amount, + quote_hash, + }) + } +} diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs new file mode 100644 index 0000000000..fe712e1b27 --- /dev/null +++ b/evmlib/src/lib.rs @@ -0,0 +1,153 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::common::{Address, QuoteHash, TxHash, U256}; +use crate::transaction::verify_data_payment; +use alloy::primitives::address; +use alloy::transports::http::reqwest; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DisplayFromStr}; +use std::str::FromStr; +use std::sync::LazyLock; + +#[macro_use] +extern crate tracing; + +pub mod common; +pub mod contract; +pub mod cryptography; +pub(crate) mod event; +pub mod testnet; +pub mod transaction; +pub mod utils; +pub mod wallet; + +static PUBLIC_ARBITRUM_ONE_HTTP_RPC_URL: LazyLock = LazyLock::new(|| { + "https://arb1.arbitrum.io/rpc" + .parse() + .expect("Invalid RPC URL") +}); + +static PUBLIC_ARBITRUM_SEPOLIA_HTTP_RPC_URL: LazyLock = LazyLock::new(|| { + "https://sepolia-rollup.arbitrum.io/rpc" + .parse() + .expect("Invalid RPC URL") +}); + +const ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS: Address = + address!("4bc1aCE0E66170375462cB4E6Af42Ad4D5EC689C"); + +const ARBITRUM_SEPOLIA_PAYMENT_TOKEN_ADDRESS: Address = + address!("4bc1aCE0E66170375462cB4E6Af42Ad4D5EC689C"); + +// Should be updated when the smart contract changes! +const ARBITRUM_ONE_DATA_PAYMENTS_ADDRESS: Address = + address!("887930F30EDEb1B255Cd2273C3F4400919df2EFe"); + +const ARBITRUM_SEPOLIA_DATA_PAYMENTS_ADDRESS: Address = + address!("e6D6bB5Fa796baA8c1ADc439Ac0fd66fd2A1858b"); + +#[serde_as] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct CustomNetwork { + #[serde_as(as = "DisplayFromStr")] + pub rpc_url_http: reqwest::Url, + pub payment_token_address: Address, + pub data_payments_address: Address, +} + +impl CustomNetwork { + fn new(rpc_url: &str, payment_token_addr: &str, data_payments_addr: &str) -> Self { + Self { + rpc_url_http: reqwest::Url::parse(rpc_url).expect("Invalid RPC URL"), + payment_token_address: Address::from_str(payment_token_addr) + .expect("Invalid payment token address"), + data_payments_address: Address::from_str(data_payments_addr) + .expect("Invalid chunk payments address"), + } + } +} + +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] +pub enum Network { + #[default] + ArbitrumOne, + ArbitrumSepolia, + Custom(CustomNetwork), +} + +impl std::fmt::Display for Network { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Network::ArbitrumOne => write!(f, "evm-arbitrum-one"), + Network::ArbitrumSepolia => write!(f, "evm-arbitrum-sepolia"), + Network::Custom(_) => write!(f, "evm-custom"), + } + } +} + +impl Network { + pub fn new_custom(rpc_url: &str, payment_token_addr: &str, chunk_payments_addr: &str) -> Self { + Self::Custom(CustomNetwork::new( + rpc_url, + payment_token_addr, + chunk_payments_addr, + )) + } + + pub fn identifier(&self) -> &str { + match self { + Network::ArbitrumOne => "arbitrum-one", + Network::ArbitrumSepolia => "arbitrum-sepolia", + Network::Custom(_) => "custom", + } + } + + pub fn rpc_url(&self) -> &reqwest::Url { + match self { + Network::ArbitrumOne => &PUBLIC_ARBITRUM_ONE_HTTP_RPC_URL, + Network::ArbitrumSepolia => &PUBLIC_ARBITRUM_SEPOLIA_HTTP_RPC_URL, + Network::Custom(custom) => &custom.rpc_url_http, + } + } + + pub fn payment_token_address(&self) -> &Address { + match self { + Network::ArbitrumOne => &ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS, + Network::ArbitrumSepolia => &ARBITRUM_SEPOLIA_PAYMENT_TOKEN_ADDRESS, + Network::Custom(custom) => &custom.payment_token_address, + } + } + + pub fn data_payments_address(&self) -> &Address { + match self { + Network::ArbitrumOne => &ARBITRUM_ONE_DATA_PAYMENTS_ADDRESS, + Network::ArbitrumSepolia => &ARBITRUM_SEPOLIA_DATA_PAYMENTS_ADDRESS, + Network::Custom(custom) => &custom.data_payments_address, + } + } + + pub async fn verify_data_payment( + &self, + tx_hash: TxHash, + quote_hash: QuoteHash, + reward_addr: Address, + amount: U256, + quote_expiration_timestamp_in_secs: u64, + ) -> Result<(), transaction::Error> { + verify_data_payment( + self, + tx_hash, + quote_hash, + reward_addr, + amount, + quote_expiration_timestamp_in_secs, + ) + .await + } +} diff --git a/evmlib/src/testnet.rs b/evmlib/src/testnet.rs new file mode 100644 index 0000000000..e5f1f79708 --- /dev/null +++ b/evmlib/src/testnet.rs @@ -0,0 +1,150 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::common::Address; +use crate::contract::data_payments::DataPaymentsHandler; +use crate::contract::network_token::NetworkToken; +use crate::reqwest::Url; +use crate::{CustomNetwork, Network}; +use alloy::hex::ToHexExt; +use alloy::network::{Ethereum, EthereumWallet}; +use alloy::node_bindings::{Anvil, AnvilInstance}; +use alloy::providers::fillers::{ + BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller, +}; +use alloy::providers::{Identity, ProviderBuilder, ReqwestProvider}; +use alloy::signers::local::PrivateKeySigner; +use alloy::transports::http::{Client, Http}; + +pub struct Testnet { + anvil: AnvilInstance, + rpc_url: Url, + network_token_address: Address, + data_payments_address: Address, +} + +impl Testnet { + /// Starts an Anvil node and automatically deploys the network token and chunk payments smart contracts. + pub async fn new() -> Self { + let (anvil, rpc_url) = start_node(); + + let network_token = deploy_network_token_contract(&rpc_url, &anvil).await; + let data_payments = + deploy_data_payments_contract(&rpc_url, &anvil, *network_token.contract.address()) + .await; + + Testnet { + anvil, + rpc_url, + network_token_address: *network_token.contract.address(), + data_payments_address: *data_payments.contract.address(), + } + } + + pub fn to_network(&self) -> Network { + Network::Custom(CustomNetwork { + rpc_url_http: self.rpc_url.clone(), + payment_token_address: self.network_token_address, + data_payments_address: self.data_payments_address, + }) + } + + pub fn default_wallet_private_key(&self) -> String { + // Fetches private key from the first default Anvil account (Alice). + let signer: PrivateKeySigner = self.anvil.keys()[0].clone().into(); + signer.to_bytes().encode_hex_with_prefix() + } +} + +/// Runs a local Anvil node bound to a specified IP address. +/// +/// The `AnvilInstance` `endpoint` function is hardcoded to return "localhost", so we must also +/// return the RPC URL if we want to listen on a different address. +/// +/// The `anvil` binary respects the `ANVIL_IP_ADDR` environment variable, but defaults to "localhost". +pub fn start_node() -> (AnvilInstance, Url) { + let host = std::env::var("ANVIL_IP_ADDR").unwrap_or_else(|_| "localhost".to_string()); + let port = std::env::var("ANVIL_PORT") + .unwrap_or_else(|_| "4343".to_string()) + .parse::() + .expect("Invalid port number"); + + let anvil = Anvil::new() + .port(port) + .try_spawn() + .expect("Could not spawn Anvil node"); + + let url = Url::parse(&format!("http://{host}:{port}")).expect("Failed to parse URL"); + + (anvil, url) +} + +pub async fn deploy_network_token_contract( + rpc_url: &Url, + anvil: &AnvilInstance, +) -> NetworkToken< + Http, + FillProvider< + JoinFill< + JoinFill< + Identity, + JoinFill>>, + >, + WalletFiller, + >, + ReqwestProvider, + Http, + Ethereum, + >, + Ethereum, +> { + // Set up signer from the first default Anvil account (Alice). + let signer: PrivateKeySigner = anvil.keys()[0].clone().into(); + let wallet = EthereumWallet::from(signer); + + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(wallet) + .on_http(rpc_url.clone()); + + // Deploy the contract. + NetworkToken::deploy(provider).await +} + +pub async fn deploy_data_payments_contract( + rpc_url: &Url, + anvil: &AnvilInstance, + token_address: Address, +) -> DataPaymentsHandler< + Http, + FillProvider< + JoinFill< + JoinFill< + Identity, + JoinFill>>, + >, + WalletFiller, + >, + ReqwestProvider, + Http, + Ethereum, + >, + Ethereum, +> { + // Set up signer from the second default Anvil account (Bob). + let signer: PrivateKeySigner = anvil.keys()[1].clone().into(); + let wallet = EthereumWallet::from(signer); + + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(wallet) + .on_http(rpc_url.clone()); + + // Deploy the contract. + DataPaymentsHandler::deploy(provider, token_address).await +} diff --git a/evmlib/src/transaction.rs b/evmlib/src/transaction.rs new file mode 100644 index 0000000000..dc8609a4d5 --- /dev/null +++ b/evmlib/src/transaction.rs @@ -0,0 +1,220 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::common::{Address, QuoteHash, TxHash, U256}; +use crate::event::{ChunkPaymentEvent, DATA_PAYMENT_EVENT_SIGNATURE}; +use crate::Network; +use alloy::eips::BlockNumberOrTag; +use alloy::primitives::FixedBytes; +use alloy::providers::{Provider, ProviderBuilder}; +use alloy::rpc::types::{Block, Filter, Log, TransactionReceipt}; +use alloy::transports::{RpcError, TransportErrorKind}; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + RpcError(#[from] RpcError), + #[error("Transaction is not confirmed")] + TransactionUnconfirmed, + #[error("Transaction was not found")] + TransactionNotFound, + #[error("Transaction has not been included in a block yet")] + TransactionNotInBlock, + #[error("Block was not found")] + BlockNotFound, + #[error("No event proof found")] + EventProofNotFound, + #[error("Payment was done after the quote expired")] + QuoteExpired, +} + +/// Get a transaction receipt by its hash. +pub async fn get_transaction_receipt_by_hash( + network: &Network, + transaction_hash: TxHash, +) -> Result, Error> { + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .on_http(network.rpc_url().clone()); + let maybe_receipt = provider + .get_transaction_receipt(transaction_hash) + .await + .inspect_err(|err| error!("Error getting transaction receipt for transaction_hash: {transaction_hash:?} : {err:?}", ))?; + Ok(maybe_receipt) +} + +/// Get a block by its block number. +async fn get_block_by_number(network: &Network, block_number: u64) -> Result, Error> { + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .on_http(network.rpc_url().clone()); + let block = provider + .get_block_by_number(BlockNumberOrTag::Number(block_number), true) + .await + .inspect_err(|err| error!("Error getting block by number for {block_number} : {err:?}",))?; + Ok(block) +} + +/// Get transaction logs using a filter. +async fn get_transaction_logs(network: &Network, filter: Filter) -> Result, Error> { + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .on_http(network.rpc_url().clone()); + let logs = provider + .get_logs(&filter) + .await + .inspect_err(|err| error!("Error getting logs for filter: {filter:?} : {err:?}"))?; + Ok(logs) +} + +/// Get a DataPaymentMade event, filtered by a hashed chunk address and a node address. +/// Useful for a node if it wants to check if payment for a certain chunk has been made. +async fn get_data_payment_event( + network: &Network, + block_number: u64, + quote_hash: QuoteHash, + reward_addr: Address, + amount: U256, +) -> Result, Error> { + debug!( + "Getting data payment event for quote_hash: {quote_hash:?}, reward_addr: {reward_addr:?}" + ); + let topic1: FixedBytes<32> = FixedBytes::left_padding_from(reward_addr.as_slice()); + + let filter = Filter::new() + .event_signature(DATA_PAYMENT_EVENT_SIGNATURE) + .topic1(topic1) + .topic2(amount) + .topic3(quote_hash) + .from_block(block_number) + .to_block(block_number); + + get_transaction_logs(network, filter).await +} + +/// Verify if a data payment is confirmed. +pub async fn verify_data_payment( + network: &Network, + tx_hash: TxHash, + quote_hash: QuoteHash, + reward_addr: Address, + amount: U256, + quote_expiration_timestamp_in_secs: u64, +) -> Result<(), Error> { + debug!("Verifying data payment for tx_hash: {tx_hash:?}"); + let transaction = get_transaction_receipt_by_hash(network, tx_hash) + .await? + .ok_or(Error::TransactionNotFound)?; + + // If the status is True, it means the tx is confirmed. + if !transaction.status() { + error!("Transaction {tx_hash:?} is not confirmed"); + return Err(Error::TransactionUnconfirmed); + } + + let block_number = transaction + .block_number + .ok_or(Error::TransactionNotInBlock) + .inspect_err(|_| error!("Transaction {tx_hash:?} has not been included in a block yet"))?; + + let block = get_block_by_number(network, block_number) + .await? + .ok_or(Error::BlockNotFound)?; + + // Check if payment was done within the quote expiration timeframe. + if quote_expiration_timestamp_in_secs < block.header.timestamp { + error!("Payment for tx_hash: {tx_hash:?} was done after the quote expired"); + return Err(Error::QuoteExpired); + } + + let logs = + get_data_payment_event(network, block_number, quote_hash, reward_addr, amount).await?; + + for log in logs { + if log.transaction_hash != Some(tx_hash) { + // Wrong transaction. + continue; + } + + if let Ok(event) = ChunkPaymentEvent::try_from(log) { + // Check if the event matches what we expect. + if event.quote_hash == quote_hash + && event.rewards_address == reward_addr + && event.amount >= amount + { + return Ok(()); + } + } + } + + error!("No event proof found for tx_hash: {tx_hash:?}"); + + Err(Error::EventProofNotFound) +} + +#[cfg(test)] +mod tests { + use crate::common::{Address, U256}; + use crate::transaction::{ + get_data_payment_event, get_transaction_receipt_by_hash, verify_data_payment, + }; + use crate::Network; + use alloy::hex::FromHex; + use alloy::primitives::b256; + + #[tokio::test] + async fn test_get_transaction_receipt_by_hash() { + let network = Network::ArbitrumOne; + + let tx_hash = b256!("3304465f38fa0bd9670a426108dd1ddd193e059dcb7c13982d31424646217a36"); // DevSkim: ignore DS173237 + + assert!(get_transaction_receipt_by_hash(&network, tx_hash) + .await + .unwrap() + .is_some()); + } + + #[tokio::test] + async fn test_get_data_payment_event() { + let network = Network::ArbitrumOne; + + let block_number: u64 = 260246302; + let reward_address = Address::from_hex("8AB15A43305854e4AE4E6FBEa0CD1CC0AB4ecB2A").unwrap(); // DevSkim: ignore DS173237 + let amount = U256::from(1); + let quote_hash = b256!("EBD943C38C0422901D4CF22E677DD95F2591CA8D6EBFEA8BAF1BFE9FF5506ECE"); // DevSkim: ignore DS173237 + + let logs = + get_data_payment_event(&network, block_number, quote_hash, reward_address, amount) + .await + .unwrap(); + + assert_eq!(logs.len(), 1); + } + + #[tokio::test] + async fn test_verify_data_payment() { + let network = Network::ArbitrumOne; + + let tx_hash = b256!("3304465f38fa0bd9670a426108dd1ddd193e059dcb7c13982d31424646217a36"); // DevSkim: ignore DS173237 + let quote_hash = b256!("EBD943C38C0422901D4CF22E677DD95F2591CA8D6EBFEA8BAF1BFE9FF5506ECE"); // DevSkim: ignore DS173237 + let reward_address = Address::from_hex("8AB15A43305854e4AE4E6FBEa0CD1CC0AB4ecB2A").unwrap(); // DevSkim: ignore DS173237 + let amount = U256::from(1); + + let result = verify_data_payment( + &network, + tx_hash, + quote_hash, + reward_address, + amount, + 4102441200, + ) + .await; + + assert!(result.is_ok(), "Error: {:?}", result.err()); + } +} diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs new file mode 100644 index 0000000000..e6f657938b --- /dev/null +++ b/evmlib/src/utils.rs @@ -0,0 +1,145 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#![allow(dead_code)] + +use crate::common::{Address, Hash}; +use crate::{CustomNetwork, Network}; +use dirs_next::data_dir; +use rand::Rng; +use std::env; +use std::path::PathBuf; + +pub const EVM_TESTNET_CSV_FILENAME: &str = "evm_testnet_data.csv"; + +/// environment variable to connect to a custom EVM network +pub const RPC_URL: &str = "RPC_URL"; +const RPC_URL_BUILD_TIME_VAL: Option<&str> = option_env!("RPC_URL"); +pub const PAYMENT_TOKEN_ADDRESS: &str = "PAYMENT_TOKEN_ADDRESS"; +const PAYMENT_TOKEN_ADDRESS_BUILD_TIME_VAL: Option<&str> = option_env!("PAYMENT_TOKEN_ADDRESS"); +pub const DATA_PAYMENTS_ADDRESS: &str = "DATA_PAYMENTS_ADDRESS"; +const DATA_PAYMENTS_ADDRESS_BUILD_TIME_VAL: Option<&str> = option_env!("DATA_PAYMENTS_ADDRESS"); + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Failed to get EVM network: {0}")] + FailedToGetEvmNetwork(String), +} + +/// Generate a random Address. +pub fn dummy_address() -> Address { + Address::new(rand::rngs::OsRng.gen()) +} + +/// Generate a random Hash. +pub fn dummy_hash() -> Hash { + Hash::new(rand::rngs::OsRng.gen()) +} + +pub fn get_evm_testnet_csv_path() -> Result { + let file = data_dir() + .ok_or(Error::FailedToGetEvmNetwork( + "failed to get data dir when fetching evm testnet CSV file".to_string(), + ))? + .join("safe") + .join(EVM_TESTNET_CSV_FILENAME); + Ok(file) +} + +/// Get the `Network` from environment variables +/// Returns an error if we cannot obtain the network from any means. +pub fn get_evm_network_from_env() -> Result { + let evm_vars = [ + env::var(RPC_URL) + .ok() + .or_else(|| RPC_URL_BUILD_TIME_VAL.map(|s| s.to_string())), + env::var(PAYMENT_TOKEN_ADDRESS) + .ok() + .or_else(|| PAYMENT_TOKEN_ADDRESS_BUILD_TIME_VAL.map(|s| s.to_string())), + env::var(DATA_PAYMENTS_ADDRESS) + .ok() + .or_else(|| DATA_PAYMENTS_ADDRESS_BUILD_TIME_VAL.map(|s| s.to_string())), + ] + .into_iter() + .map(|var| { + var.ok_or(Error::FailedToGetEvmNetwork(format!( + "missing env var, make sure to set all of: {RPC_URL}, {PAYMENT_TOKEN_ADDRESS}, {DATA_PAYMENTS_ADDRESS}" + ))) + }) + .collect::, Error>>(); + + let mut use_local_evm = std::env::var("EVM_NETWORK") + .map(|v| v == "local") + .unwrap_or(false); + if use_local_evm { + info!("Using local EVM network as EVM_NETWORK is set to 'local'"); + } + if cfg!(feature = "local") { + use_local_evm = true; + info!("Using local EVM network as 'local' feature flag is enabled"); + } + + let use_arbitrum_one = std::env::var("EVM_NETWORK") + .map(|v| v == "arbitrum-one") + .unwrap_or(false); + + let use_arbitrum_sepolia = std::env::var("EVM_NETWORK") + .map(|v| v == "arbitrum-sepolia") + .unwrap_or(false); + + if use_local_evm { + local_evm_network_from_csv() + } else if use_arbitrum_one { + info!("Using Arbitrum One EVM network as EVM_NETWORK is set to 'arbitrum-one'"); + Ok(Network::ArbitrumOne) + } else if use_arbitrum_sepolia { + info!("Using Arbitrum Sepolia EVM network as EVM_NETWORK is set to 'arbitrum-sepolia'"); + Ok(Network::ArbitrumSepolia) + } else if let Ok(evm_vars) = evm_vars { + info!("Using custom EVM network from environment variables"); + Ok(Network::Custom(CustomNetwork::new( + &evm_vars[0], + &evm_vars[1], + &evm_vars[2], + ))) + } else { + error!("Failed to obtain EVM Network through any means"); + Err(Error::FailedToGetEvmNetwork( + "Failed to obtain EVM Network through any means".to_string(), + )) + } +} + +/// Get the `Network::Custom` from the local EVM testnet CSV file +fn local_evm_network_from_csv() -> Result { + // load the csv + let csv_path = get_evm_testnet_csv_path()?; + + if !csv_path.exists() { + error!("evm data csv path does not exist {:?}", csv_path); + return Err(Error::FailedToGetEvmNetwork(format!( + "evm data csv path does not exist {csv_path:?}" + ))); + } + + let csv = std::fs::read_to_string(&csv_path).map_err(|_| { + Error::FailedToGetEvmNetwork(format!("failed to read evm testnet CSV file {csv_path:?}")) + })?; + let parts: Vec<&str> = csv.split(',').collect(); + match parts.as_slice() { + [rpc_url, payment_token_address, chunk_payments_address, _] => Ok(Network::Custom( + CustomNetwork::new(rpc_url, payment_token_address, chunk_payments_address), + )), + _ => { + error!("Invalid data in evm testnet CSV file"); + Err(Error::FailedToGetEvmNetwork( + "invalid data in evm testnet CSV file".to_string(), + )) + } + } +} diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs new file mode 100644 index 0000000000..9fa3c92ce1 --- /dev/null +++ b/evmlib/src/wallet.rs @@ -0,0 +1,380 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::common::{Address, QuoteHash, QuotePayment, TxHash, U256}; +use crate::contract::data_payments::{DataPaymentsHandler, MAX_TRANSFERS_PER_TRANSACTION}; +use crate::contract::network_token::NetworkToken; +use crate::contract::{data_payments, network_token}; +use crate::Network; +use alloy::network::{Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder}; +use alloy::providers::fillers::{ + BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller, +}; +use alloy::providers::{Identity, Provider, ProviderBuilder, ReqwestProvider}; +use alloy::rpc::types::TransactionRequest; +use alloy::signers::local::{LocalSigner, PrivateKeySigner}; +use alloy::transports::http::{reqwest, Client, Http}; +use alloy::transports::{RpcError, TransportErrorKind}; +use std::collections::BTreeMap; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Private key is invalid")] + PrivateKeyInvalid, + #[error(transparent)] + RpcError(#[from] RpcError), + #[error("Network token contract error: {0}")] + NetworkTokenContract(#[from] network_token::Error), + #[error("Chunk payments contract error: {0}")] + ChunkPaymentsContract(#[from] data_payments::error::Error), +} + +#[derive(Clone)] +pub struct Wallet { + wallet: EthereumWallet, + network: Network, +} + +impl Wallet { + /// Creates a new Wallet object with the specific Network and EthereumWallet. + pub fn new(network: Network, wallet: EthereumWallet) -> Self { + Self { wallet, network } + } + + /// Convenience function that creates a new Wallet with a random EthereumWallet. + pub fn new_with_random_wallet(network: Network) -> Self { + Self::new(network, random()) + } + + /// Creates a new Wallet based on the given private_key. It will fail with Error::PrivateKeyInvalid if private_key is invalid. + pub fn new_from_private_key(network: Network, private_key: &str) -> Result { + let wallet = from_private_key(private_key)?; + Ok(Self::new(network, wallet)) + } + + /// Returns the address of this wallet. + pub fn address(&self) -> Address { + wallet_address(&self.wallet) + } + + /// Returns the raw balance of payment tokens for this wallet. + pub async fn balance_of_tokens(&self) -> Result { + balance_of_tokens(self.address(), &self.network).await + } + + /// Returns the raw balance of gas tokens for this wallet. + pub async fn balance_of_gas_tokens(&self) -> Result { + balance_of_gas_tokens(self.address(), &self.network).await + } + + /// Transfer a raw amount of payment tokens to another address. + pub async fn transfer_tokens( + &self, + to: Address, + amount: U256, + ) -> Result { + transfer_tokens(self.wallet.clone(), &self.network, to, amount).await + } + + /// Transfer a raw amount of gas tokens to another address. + pub async fn transfer_gas_tokens( + &self, + to: Address, + amount: U256, + ) -> Result { + transfer_gas_tokens(self.wallet.clone(), &self.network, to, amount).await + } + + /// See how many tokens of the owner may be spent by the spender. + pub async fn token_allowance(&self, spender: Address) -> Result { + token_allowance(&self.network, self.address(), spender).await + } + + /// Approve an address / smart contract to spend this wallet's payment tokens. + pub async fn approve_to_spend_tokens( + &self, + spender: Address, + amount: U256, + ) -> Result { + approve_to_spend_tokens(self.wallet.clone(), &self.network, spender, amount).await + } + + /// Pays for a single quote. Returns transaction hash of the payment. + pub async fn pay_for_quote( + &self, + quote_hash: QuoteHash, + rewards_addr: Address, + amount: U256, + ) -> Result { + self.pay_for_quotes([(quote_hash, rewards_addr, amount)]) + .await + .map(|v| v.values().last().cloned().expect("Infallible")) + .map_err(|err| err.0) + } + + /// Function for batch payments of quotes. It accepts an iterator of QuotePayment and returns + /// transaction hashes of the payments by quotes. + pub async fn pay_for_quotes>( + &self, + data_payments: I, + ) -> Result, PayForQuotesError> { + pay_for_quotes(self.wallet.clone(), &self.network, data_payments).await + } +} + +/// Generate an EthereumWallet with a random private key. +fn random() -> EthereumWallet { + let signer: PrivateKeySigner = LocalSigner::random(); + EthereumWallet::from(signer) +} + +/// Creates a wallet from a private key in HEX format. +fn from_private_key(private_key: &str) -> Result { + let signer: PrivateKeySigner = private_key.parse().map_err(|err| { + error!("Error parsing private key: {err}"); + Error::PrivateKeyInvalid + })?; + Ok(EthereumWallet::from(signer)) +} + +// TODO(optimization): Find a way to reuse/persist contracts and/or a provider without the wallet nonce going out of sync + +#[allow(clippy::type_complexity)] +fn http_provider( + rpc_url: reqwest::Url, +) -> FillProvider< + JoinFill< + Identity, + JoinFill>>, + >, + ReqwestProvider, + Http, + Ethereum, +> { + ProviderBuilder::new() + .with_recommended_fillers() + .on_http(rpc_url) +} + +#[allow(clippy::type_complexity)] +fn http_provider_with_wallet( + rpc_url: reqwest::Url, + wallet: EthereumWallet, +) -> FillProvider< + JoinFill< + JoinFill< + Identity, + JoinFill>>, + >, + WalletFiller, + >, + ReqwestProvider, + Http, + Ethereum, +> { + ProviderBuilder::new() + .with_recommended_fillers() + .wallet(wallet) + .on_http(rpc_url) +} + +/// Returns the address of this wallet. +pub fn wallet_address(wallet: &EthereumWallet) -> Address { + >::default_signer_address(wallet) +} + +/// Returns the raw balance of payment tokens for this wallet. +pub async fn balance_of_tokens( + account: Address, + network: &Network, +) -> Result { + info!("Getting balance of tokens for account: {account}"); + let provider = http_provider(network.rpc_url().clone()); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.balance_of(account).await +} + +/// Returns the raw balance of gas tokens for this wallet. +pub async fn balance_of_gas_tokens( + account: Address, + network: &Network, +) -> Result { + debug!("Getting balance of gas tokens for account: {account}"); + let provider = http_provider(network.rpc_url().clone()); + let balance = provider.get_balance(account).await?; + Ok(balance) +} + +/// See how many tokens of the owner may be spent by the spender. +pub async fn token_allowance( + network: &Network, + owner: Address, + spender: Address, +) -> Result { + debug!("Getting allowance for owner: {owner} and spender: {spender}",); + let provider = http_provider(network.rpc_url().clone()); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.allowance(owner, spender).await +} + +/// Approve an address / smart contract to spend this wallet's payment tokens. +pub async fn approve_to_spend_tokens( + wallet: EthereumWallet, + network: &Network, + spender: Address, + amount: U256, +) -> Result { + debug!("Approving address/smart contract with {amount} tokens at address: {spender}",); + let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.approve(spender, amount).await +} + +/// Transfer payment tokens from the supplied wallet to an address. +pub async fn transfer_tokens( + wallet: EthereumWallet, + network: &Network, + receiver: Address, + amount: U256, +) -> Result { + debug!("Transferring {amount} tokens to {receiver}"); + let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.transfer(receiver, amount).await +} + +/// Transfer native/gas tokens from the supplied wallet to an address. +pub async fn transfer_gas_tokens( + wallet: EthereumWallet, + network: &Network, + receiver: Address, + amount: U256, +) -> Result { + debug!("Transferring {amount} gas tokens to {receiver}"); + let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); + let tx = TransactionRequest::default() + .with_to(receiver) + .with_value(amount); + + let tx_hash = provider.send_transaction(tx).await?.watch().await?; + + Ok(tx_hash) +} + +/// Contains the payment error and the already succeeded batch payments (if any). +#[derive(Debug)] +pub struct PayForQuotesError(pub Error, pub BTreeMap); + +/// Use this wallet to pay for chunks in batched transfer transactions. +/// If the amount of transfers is more than one transaction can contain, the transfers will be split up over multiple transactions. +pub async fn pay_for_quotes>( + wallet: EthereumWallet, + network: &Network, + payments: T, +) -> Result, PayForQuotesError> { + let payments: Vec<_> = payments.into_iter().collect(); + info!("Paying for quotes of len: {}", payments.len()); + + let total_amount = payments.iter().map(|(_, _, amount)| amount).sum(); + + let mut tx_hashes_by_quote = BTreeMap::new(); + + // Check allowance + let allowance = token_allowance( + network, + wallet_address(&wallet), + *network.data_payments_address(), + ) + .await + .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; + + // TODO: Get rid of approvals altogether, by using permits or whatever.. + if allowance < total_amount { + // Approve the contract to spend all the client's tokens. + approve_to_spend_tokens( + wallet.clone(), + network, + *network.data_payments_address(), + U256::MAX, + ) + .await + .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; + } + + let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); + let data_payments = DataPaymentsHandler::new(*network.data_payments_address(), provider); + + // Divide transfers over multiple transactions if they exceed the max per transaction. + let chunks = payments.chunks(MAX_TRANSFERS_PER_TRANSACTION); + + for batch in chunks { + let batch: Vec = batch.to_vec(); + debug!( + "Paying for batch of quotes of len: {}, {batch:?}", + batch.len() + ); + + let tx_hash = data_payments + .pay_for_quotes(batch.clone()) + .await + .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; + info!("Paid for batch of quotes with final tx hash: {tx_hash}"); + + for (quote_hash, _, _) in batch { + tx_hashes_by_quote.insert(quote_hash, tx_hash); + } + } + + Ok(tx_hashes_by_quote) +} + +#[cfg(test)] +mod tests { + use crate::common::Amount; + use crate::testnet::Testnet; + use crate::wallet::{from_private_key, Wallet}; + use alloy::network::{Ethereum, EthereumWallet, NetworkWallet}; + use alloy::primitives::address; + + #[tokio::test] + async fn test_from_private_key() { + let private_key = "bf210844fa5463e373974f3d6fbedf451350c3e72b81b3c5b1718cb91f49c33d"; // DevSkim: ignore DS117838 + let wallet = from_private_key(private_key).unwrap(); + let account = >::default_signer_address(&wallet); + + // Assert that the addresses are the same, i.e. the wallet was successfully created from the private key + assert_eq!( + account, + address!("1975d01f46D70AAc0dd3fCf942d92650eE63C79A") + ); + } + + #[tokio::test] + async fn test_transfer_gas_tokens() { + let testnet = Testnet::new().await; + let network = testnet.to_network(); + let wallet = + Wallet::new_from_private_key(network.clone(), &testnet.default_wallet_private_key()) + .unwrap(); + let receiver_wallet = Wallet::new_with_random_wallet(network); + let transfer_amount = Amount::from(117); + + let initial_balance = receiver_wallet.balance_of_gas_tokens().await.unwrap(); + + assert_eq!(initial_balance, Amount::from(0)); + + let _ = wallet + .transfer_gas_tokens(receiver_wallet.address(), transfer_amount) + .await + .unwrap(); + + let final_balance = receiver_wallet.balance_of_gas_tokens().await.unwrap(); + + assert_eq!(final_balance, transfer_amount); + } +} diff --git a/evmlib/tests/common/mod.rs b/evmlib/tests/common/mod.rs new file mode 100644 index 0000000000..9e1a6d4369 --- /dev/null +++ b/evmlib/tests/common/mod.rs @@ -0,0 +1 @@ +pub mod quote; diff --git a/evmlib/tests/common/quote.rs b/evmlib/tests/common/quote.rs new file mode 100644 index 0000000000..21d05cf189 --- /dev/null +++ b/evmlib/tests/common/quote.rs @@ -0,0 +1,10 @@ +use evmlib::common::{Amount, QuotePayment}; +use evmlib::utils::{dummy_address, dummy_hash}; + +#[allow(dead_code)] +pub fn random_quote_payment() -> QuotePayment { + let quote_hash = dummy_hash(); + let reward_address = dummy_address(); + let amount = Amount::from(200); + (quote_hash, reward_address, amount) +} diff --git a/evmlib/tests/data_payments.rs b/evmlib/tests/data_payments.rs new file mode 100644 index 0000000000..26223cfcc1 --- /dev/null +++ b/evmlib/tests/data_payments.rs @@ -0,0 +1,137 @@ +mod common; + +use crate::common::quote::random_quote_payment; +use alloy::network::{Ethereum, EthereumWallet}; +use alloy::node_bindings::AnvilInstance; +use alloy::primitives::utils::parse_ether; +use alloy::providers::ext::AnvilApi; +use alloy::providers::fillers::{ + BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller, +}; +use alloy::providers::{Identity, ProviderBuilder, ReqwestProvider, WalletProvider}; +use alloy::signers::local::{LocalSigner, PrivateKeySigner}; +use alloy::transports::http::{Client, Http}; +use evmlib::common::U256; +use evmlib::contract::data_payments::{DataPaymentsHandler, MAX_TRANSFERS_PER_TRANSACTION}; +use evmlib::contract::network_token::NetworkToken; +use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract, start_node}; +use evmlib::wallet::wallet_address; + +async fn setup() -> ( + AnvilInstance, + NetworkToken< + Http, + FillProvider< + JoinFill< + JoinFill< + Identity, + JoinFill< + GasFiller, + JoinFill>, + >, + >, + WalletFiller, + >, + ReqwestProvider, + Http, + Ethereum, + >, + Ethereum, + >, + DataPaymentsHandler< + Http, + FillProvider< + JoinFill< + JoinFill< + Identity, + JoinFill< + GasFiller, + JoinFill>, + >, + >, + WalletFiller, + >, + ReqwestProvider, + Http, + Ethereum, + >, + Ethereum, + >, +) { + let (anvil, rpc_url) = start_node(); + + let network_token = deploy_network_token_contract(&rpc_url, &anvil).await; + + let data_payments = + deploy_data_payments_contract(&rpc_url, &anvil, *network_token.contract.address()).await; + + (anvil, network_token, data_payments) +} + +#[allow(clippy::unwrap_used)] +#[allow(clippy::type_complexity)] +#[allow(dead_code)] +async fn provider_with_gas_funded_wallet( + anvil: &AnvilInstance, +) -> FillProvider< + JoinFill< + JoinFill< + Identity, + JoinFill>>, + >, + WalletFiller, + >, + ReqwestProvider, + Http, + Ethereum, +> { + let signer: PrivateKeySigner = LocalSigner::random(); + let wallet = EthereumWallet::from(signer); + + let rpc_url = anvil.endpoint().parse().unwrap(); + + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(wallet) + .on_http(rpc_url); + + let account = wallet_address(provider.wallet()); + + // Fund the wallet with plenty of gas tokens + provider + .anvil_set_balance(account, parse_ether("1000").expect("")) + .await + .unwrap(); + + provider +} + +#[tokio::test] +async fn test_deploy() { + setup().await; +} + +#[tokio::test] +async fn test_pay_for_quotes() { + let (_anvil, network_token, mut data_payments) = setup().await; + + let mut quote_payments = vec![]; + + for _ in 0..MAX_TRANSFERS_PER_TRANSACTION { + let quote_payment = random_quote_payment(); + quote_payments.push(quote_payment); + } + + let _ = network_token + .approve(*data_payments.contract.address(), U256::MAX) + .await + .unwrap(); + + // Contract provider has a different account coupled to it, + // so we set it to the same as the network token contract + data_payments.set_provider(network_token.contract.provider().clone()); + + let result = data_payments.pay_for_quotes(quote_payments).await; + + assert!(result.is_ok(), "Failed with error: {:?}", result.err()); +} diff --git a/evmlib/tests/network_token.rs b/evmlib/tests/network_token.rs new file mode 100644 index 0000000000..0cc2b1c1eb --- /dev/null +++ b/evmlib/tests/network_token.rs @@ -0,0 +1,94 @@ +mod common; + +use alloy::network::{Ethereum, EthereumWallet, NetworkWallet}; +use alloy::node_bindings::AnvilInstance; +use alloy::primitives::U256; +use alloy::providers::fillers::{ + BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller, +}; +use alloy::providers::{Identity, ReqwestProvider, WalletProvider}; +use alloy::signers::local::PrivateKeySigner; +use alloy::transports::http::{Client, Http}; +use evmlib::contract::network_token::NetworkToken; +use evmlib::testnet::{deploy_network_token_contract, start_node}; +use evmlib::wallet::wallet_address; +use std::str::FromStr; + +async fn setup() -> ( + AnvilInstance, + NetworkToken< + Http, + FillProvider< + JoinFill< + JoinFill< + Identity, + JoinFill< + GasFiller, + JoinFill>, + >, + >, + WalletFiller, + >, + ReqwestProvider, + Http, + Ethereum, + >, + Ethereum, + >, +) { + let (anvil, rpc_url) = start_node(); + + let network_token = deploy_network_token_contract(&rpc_url, &anvil).await; + + (anvil, network_token) +} + +#[tokio::test] +async fn test_deploy() { + setup().await; +} + +#[tokio::test] +async fn test_balance_of() { + let (_anvil, contract) = setup().await; + + let account = >::default_signer_address( + contract.contract.provider().wallet(), + ); + + let balance = contract.balance_of(account).await.unwrap(); + + assert_eq!( + balance, + U256::from_str("20000000000000000000000000").unwrap() + ); +} + +#[tokio::test] +async fn test_approve() { + let (_anvil, network_token) = setup().await; + + let account = wallet_address(network_token.contract.provider().wallet()); + + let spend_value = U256::from(1); + let spender = PrivateKeySigner::random(); + + // Approve for the spender to spend a value from the funds of the owner (our default account). + let approval_result = network_token.approve(spender.address(), spend_value).await; + + assert!( + approval_result.is_ok(), + "Approval failed with error: {:?}", + approval_result.err() + ); + + let allowance = network_token + .contract + .allowance(account, spender.address()) + .call() + .await + .unwrap() + ._0; + + assert_eq!(allowance, spend_value); +} diff --git a/evmlib/tests/wallet.rs b/evmlib/tests/wallet.rs new file mode 100644 index 0000000000..905f719fc3 --- /dev/null +++ b/evmlib/tests/wallet.rs @@ -0,0 +1,110 @@ +mod common; + +use crate::common::quote::random_quote_payment; +use alloy::network::EthereumWallet; +use alloy::node_bindings::AnvilInstance; +use alloy::primitives::utils::parse_ether; +use alloy::providers::ext::AnvilApi; +use alloy::providers::{ProviderBuilder, WalletProvider}; +use alloy::signers::local::{LocalSigner, PrivateKeySigner}; +use evmlib::common::{Amount, TxHash}; +use evmlib::contract::data_payments::MAX_TRANSFERS_PER_TRANSACTION; +use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract, start_node}; +use evmlib::transaction::verify_data_payment; +use evmlib::wallet::{transfer_tokens, wallet_address, Wallet}; +use evmlib::{CustomNetwork, Network}; +use std::collections::HashSet; + +#[allow(clippy::unwrap_used)] +async fn local_testnet() -> (AnvilInstance, Network, EthereumWallet) { + let (anvil, rpc_url) = start_node(); + let network_token = deploy_network_token_contract(&rpc_url, &anvil).await; + let payment_token_address = *network_token.contract.address(); + let data_payments = + deploy_data_payments_contract(&rpc_url, &anvil, payment_token_address).await; + + ( + anvil, + Network::Custom(CustomNetwork { + rpc_url_http: rpc_url, + payment_token_address, + data_payments_address: *data_payments.contract.address(), + }), + network_token.contract.provider().wallet().clone(), + ) +} + +#[allow(clippy::unwrap_used)] +async fn funded_wallet(network: &Network, genesis_wallet: EthereumWallet) -> Wallet { + let signer: PrivateKeySigner = LocalSigner::random(); + let wallet = EthereumWallet::from(signer); + let account = wallet_address(&wallet); + + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(genesis_wallet.clone()) + .on_http(network.rpc_url().clone()); + + // Fund the wallet with plenty of gas tokens + provider + .anvil_set_balance(account, parse_ether("1000").expect("")) + .await + .unwrap(); + + // Fund the wallet with plenty of ERC20 tokens + transfer_tokens( + genesis_wallet, + network, + account, + Amount::from(9999999999_u64), + ) + .await + .unwrap(); + + Wallet::new(network.clone(), wallet) +} + +#[tokio::test] +async fn test_pay_for_quotes_and_data_payment_verification() { + const TRANSFERS: usize = 600; + const EXPIRATION_TIMESTAMP_IN_SECS: u64 = 4102441200; // The year 2100 + + let (_anvil, network, genesis_wallet) = local_testnet().await; + let wallet = funded_wallet(&network, genesis_wallet).await; + + let mut quote_payments = vec![]; + + for _ in 0..TRANSFERS { + let quote = random_quote_payment(); + quote_payments.push(quote); + } + + let tx_hashes = wallet.pay_for_quotes(quote_payments.clone()).await.unwrap(); + + let unique_tx_hashes: HashSet = tx_hashes.values().cloned().collect(); + + assert_eq!( + unique_tx_hashes.len(), + TRANSFERS.div_ceil(MAX_TRANSFERS_PER_TRANSACTION) + ); + + for quote_payment in quote_payments.iter() { + let tx_hash = *tx_hashes.get("e_payment.0).unwrap(); + + let result = verify_data_payment( + &network, + tx_hash, + quote_payment.0, + quote_payment.1, + quote_payment.2, + EXPIRATION_TIMESTAMP_IN_SECS, + ) + .await; + + assert!( + result.is_ok(), + "Verification failed for: {quote_payment:?}. Error: {:?}", + result.err() + ); + } +} diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index bb3b5f5afe..bbab570e94 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,18 +7,21 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.7" +version = "0.2.8" [[bin]] name = "nat-detection" path = "src/main.rs" +[features] +nightly = [] + [dependencies] clap = { version = "4.5.4", features = ["derive"] } clap-verbosity-flag = "2.2.0" color-eyre = { version = "0.6", default-features = false } futures = "~0.3.13" -libp2p = { version = "0.53", features = [ +libp2p = { version = "0.54.1", features = [ "tokio", "tcp", "noise", @@ -28,7 +31,9 @@ libp2p = { version = "0.53", features = [ "macros", "upnp", ] } -sn_networking = { path = "../sn_networking", version = "0.18.4" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_networking = { path = "../sn_networking", version = "0.19.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/nat-detection/src/behaviour/identify.rs b/nat-detection/src/behaviour/identify.rs index 8489034039..738a01363c 100644 --- a/nat-detection/src/behaviour/identify.rs +++ b/nat-detection/src/behaviour/identify.rs @@ -7,7 +7,11 @@ use crate::{behaviour::PROTOCOL_VERSION, App}; impl App { pub(crate) fn on_event_identify(&mut self, event: identify::Event) { match event { - identify::Event::Received { peer_id, info } => { + identify::Event::Received { + peer_id, + info, + connection_id, + } => { debug!( %peer_id, protocols=?info.protocols, @@ -18,7 +22,7 @@ impl App { // Disconnect if peer has incompatible protocol version. if info.protocol_version != PROTOCOL_VERSION { - warn!(%peer_id, "Incompatible protocol version. Disconnecting from peer."); + warn!(conn_id=%connection_id, %peer_id, "Incompatible protocol version. Disconnecting from peer."); let _ = self.swarm.disconnect_peer_id(peer_id); return; } @@ -29,12 +33,12 @@ impl App { .iter() .any(|p| *p == autonat::DEFAULT_PROTOCOL_NAME) { - warn!(%peer_id, "Peer does not support AutoNAT. Disconnecting from peer."); + warn!(conn_id=%connection_id, %peer_id, "Peer does not support AutoNAT. Disconnecting from peer."); let _ = self.swarm.disconnect_peer_id(peer_id); return; } - info!(%peer_id, "Received peer info: confirmed it supports AutoNAT"); + info!(conn_id=%connection_id, %peer_id, "Received peer info: confirmed it supports AutoNAT"); // If we're a client and the peer has (a) global listen address(es), // add it as an AutoNAT server. diff --git a/nat-detection/src/main.rs b/nat-detection/src/main.rs index 645b181266..fccbe3ea4c 100644 --- a/nat-detection/src/main.rs +++ b/nat-detection/src/main.rs @@ -35,7 +35,7 @@ const RETRY_INTERVAL: Duration = Duration::from_secs(10); /// - 11: Public under UPnP /// - 12: Private or Unknown NAT #[derive(Debug, Parser)] -#[clap(version, author, verbatim_doc_comment)] +#[clap(disable_version_flag = true)] struct Opt { /// Port to listen on. /// @@ -60,15 +60,50 @@ struct Opt { #[command(flatten)] verbose: clap_verbosity_flag::Verbosity, + + /// Print the crate version + #[clap(long)] + crate_version: bool, + + /// Print the package version + #[clap(long)] + #[cfg(not(feature = "nightly"))] + package_version: bool, + + /// Print version information. + #[clap(long)] + version: bool, } #[tokio::main] async fn main() -> Result<()> { color_eyre::install()?; - // Process command line arguments. let opt = Opt::parse(); + if opt.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi NAT Detection", + env!("CARGO_PKG_VERSION"), + None + ) + ); + return Ok(()); + } + + if opt.crate_version { + println!("Crate version: {}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if opt.package_version { + println!("Package version: {}", sn_build_info::package_version()); + return Ok(()); + } + let registry = tracing_subscriber::registry().with(tracing_subscriber::fmt::layer()); // Use `RUST_LOG` if set, else use the verbosity flag (where `-vvvv` is trace level). let _ = if std::env::var_os("RUST_LOG").is_some() { diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 3178f653e4..583edb4e60 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.3.19" +version = "0.4.0" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -14,6 +14,9 @@ build = "build.rs" name = "node-launchpad" path = "src/bin/tui/main.rs" +[features] +nightly = [] + [dependencies] atty = "0.2.14" better-panic = "0.3.0" @@ -48,10 +51,13 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn-node-manager = { version = "0.10.6", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.5.3", path = "../sn_peers_acquisition" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn-node-manager = { version = "0.11.0", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.5.4", path = "../sn_peers_acquisition" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.3.14", path = "../sn_service_management" } +sn_service_management = { version = "0.4.0", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" @@ -64,6 +70,7 @@ tui-input = "0.8.0" which = "6.0.1" faccess = "0.2.4" throbber-widgets-tui = "0.7.0" +regex = "1.11.0" [build-dependencies] vergen = { version = "8.2.6", features = ["build", "git", "gitoxide", "cargo"] } diff --git a/node-launchpad/src/bin/tui/main.rs b/node-launchpad/src/bin/tui/main.rs index 2ceb235900..d3074018af 100644 --- a/node-launchpad/src/bin/tui/main.rs +++ b/node-launchpad/src/bin/tui/main.rs @@ -16,7 +16,7 @@ use color_eyre::eyre::Result; use node_launchpad::{ app::App, config::configure_winsw, - utils::{initialize_logging, initialize_panic_handler, version}, + utils::{initialize_logging, initialize_panic_handler}, }; #[cfg(target_os = "windows")] use sn_node_manager::config::is_running_as_root; @@ -25,7 +25,7 @@ use std::{env, path::PathBuf}; use tokio::task::LocalSet; #[derive(Parser, Debug)] -#[command(author, version = version(), about)] +#[command(disable_version_flag = true)] pub struct Cli { #[arg( short, @@ -53,12 +53,48 @@ pub struct Cli { #[command(flatten)] pub(crate) peers: PeersArgs, + + /// Print the crate version. + #[clap(long)] + crate_version: bool, + + /// Print the package version. + #[clap(long)] + #[cfg(not(feature = "nightly"))] + package_version: bool, + + /// Print the version. + #[clap(long)] + version: bool, } async fn tokio_main() -> Result<()> { initialize_panic_handler()?; let args = Cli::parse(); + if args.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Node Launchpad", + env!("CARGO_PKG_VERSION"), + None + ) + ); + return Ok(()); + } + + if args.crate_version { + println!("{}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if args.package_version { + println!("{}", sn_build_info::package_version()); + return Ok(()); + } + info!("Starting app with args: {args:?}"); let mut app = App::new( args.tick_rate, diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 90a626ec33..934578f93e 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -192,10 +192,14 @@ impl Component for Options { .style(Style::default().fg(GHOST_WHITE)); // Beta Rewards Program - let beta_legend = " Edit Discord Username "; + let beta_legend = if self.discord_username.is_empty() { + " Add Wallet " + } else { + " Change Wallet " + }; let beta_key = " [Ctrl+B] "; let block2 = Block::default() - .title(" Beta Rewards Program ") + .title(" Wallet ") .title_style(Style::default().bold().fg(GHOST_WHITE)) .style(Style::default().fg(GHOST_WHITE)) .borders(Borders::ALL) @@ -204,7 +208,7 @@ impl Component for Options { vec![Row::new(vec![ Cell::from( Line::from(vec![Span::styled( - " Discord Username: ", + " Wallet Address: ", Style::default().fg(LIGHT_PERIWINKLE), )]) .alignment(Alignment::Left), diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index 615c20bcf4..f512f9d0a4 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -11,27 +11,30 @@ use super::super::Component; use crate::{ action::{Action, OptionsActions}, mode::{InputMode, Scene}, - style::{clear_area, EUCALYPTUS, GHOST_WHITE, INDIGO, LIGHT_PERIWINKLE, VIVID_SKY_BLUE}, + style::{clear_area, EUCALYPTUS, GHOST_WHITE, INDIGO, LIGHT_PERIWINKLE, RED, VIVID_SKY_BLUE}, widgets::hyperlink::Hyperlink, }; use color_eyre::Result; use crossterm::event::{Event, KeyCode, KeyEvent}; use ratatui::{prelude::*, widgets::*}; +use regex::Regex; use tui_input::{backend::crossterm::EventHandler, Input}; -const INPUT_SIZE_USERNAME: u16 = 32; // as per discord docs +const INPUT_SIZE_USERNAME: u16 = 42; // Etherum address plus 0x const INPUT_AREA_USERNAME: u16 = INPUT_SIZE_USERNAME + 2; // +2 for the padding pub struct BetaProgramme { /// Whether the component is active right now, capturing keystrokes + draw things. active: bool, state: BetaProgrammeState, - discord_input_filed: Input, + discord_input_field: Input, // cache the old value incase user presses Esc. old_value: String, back_to: Scene, + can_save: bool, } +#[allow(dead_code)] enum BetaProgrammeState { DiscordIdAlreadySet, ShowTCs, @@ -49,27 +52,43 @@ impl BetaProgramme { Self { active: false, state, - discord_input_filed: Input::default().with_value(username), + discord_input_field: Input::default().with_value(username), old_value: Default::default(), back_to: Scene::Status, + can_save: false, + } + } + + pub fn validate(&mut self) { + if self.discord_input_field.value().is_empty() { + self.can_save = false; + } else { + let re = Regex::new(r"^0x[a-fA-F0-9]{40}$").expect("Failed to compile regex"); + self.can_save = re.is_match(self.discord_input_field.value()); } } fn capture_inputs(&mut self, key: KeyEvent) -> Vec { let send_back = match key.code { KeyCode::Enter => { - let username = self.discord_input_filed.value().to_string().to_lowercase(); - self.discord_input_filed = username.clone().into(); - - debug!( - "Got Enter, saving the discord username {username:?} and switching to DiscordIdAlreadySet, and Home Scene", - ); - self.state = BetaProgrammeState::DiscordIdAlreadySet; - vec![ - Action::StoreDiscordUserName(username.clone()), - Action::OptionsActions(OptionsActions::UpdateBetaProgrammeUsername(username)), - Action::SwitchScene(Scene::Status), - ] + self.validate(); + if self.can_save { + let username = self.discord_input_field.value().to_string().to_lowercase(); + self.discord_input_field = username.clone().into(); + + debug!( + "Got Enter, saving the discord username {username:?} and switching to DiscordIdAlreadySet, and Home Scene", + ); + self.state = BetaProgrammeState::DiscordIdAlreadySet; + return vec![ + Action::StoreDiscordUserName(username.clone()), + Action::OptionsActions(OptionsActions::UpdateBetaProgrammeUsername( + username, + )), // FIXME: Change OptionsActions::UpdateBetaProgrammeUsername name + Action::SwitchScene(Scene::Status), + ]; + } + vec![] } KeyCode::Esc => { debug!( @@ -77,8 +96,8 @@ impl BetaProgramme { self.old_value ); // reset to old value - self.discord_input_filed = self - .discord_input_filed + self.discord_input_field = self + .discord_input_field .clone() .with_value(self.old_value.clone()); vec![Action::SwitchScene(self.back_to)] @@ -86,13 +105,14 @@ impl BetaProgramme { KeyCode::Char(' ') => vec![], KeyCode::Backspace => { // if max limit reached, we should allow Backspace to work. - self.discord_input_filed.handle_event(&Event::Key(key)); + self.discord_input_field.handle_event(&Event::Key(key)); + self.validate(); vec![] } _ => { - // max 32 limit as per discord docs - if self.discord_input_filed.value().chars().count() < 32 { - self.discord_input_filed.handle_event(&Event::Key(key)); + if self.discord_input_field.value().chars().count() < INPUT_SIZE_USERNAME as usize { + self.discord_input_field.handle_event(&Event::Key(key)); + self.validate(); } vec![] } @@ -109,26 +129,27 @@ impl Component for BetaProgramme { // while in entry mode, keybinds are not captured, so gotta exit entry mode from here let send_back = match &self.state { BetaProgrammeState::DiscordIdAlreadySet => self.capture_inputs(key), - BetaProgrammeState::ShowTCs => { - match key.code { - KeyCode::Char('y') | KeyCode::Char('Y') => { - let is_discord_id_set = !self.discord_input_filed.value().is_empty(); - if is_discord_id_set { - debug!("User accepted the TCs, but discord id already set, moving to DiscordIdAlreadySet"); - self.state = BetaProgrammeState::DiscordIdAlreadySet; - } else { - debug!("User accepted the TCs, but no discord id set, moving to AcceptTCsAndEnterDiscordId"); - self.state = BetaProgrammeState::AcceptTCsAndEnterDiscordId; - } + BetaProgrammeState::ShowTCs => match key.code { + KeyCode::Char('y') | KeyCode::Char('Y') => { + let is_discord_id_set = !self.discord_input_field.value().is_empty(); + if is_discord_id_set { + debug!("User accepted the TCs, but discord id already set, moving to DiscordIdAlreadySet"); + self.state = BetaProgrammeState::DiscordIdAlreadySet; + } else { + debug!("User accepted the TCs, but no discord id set, moving to AcceptTCsAndEnterDiscordId"); + self.state = BetaProgrammeState::AcceptTCsAndEnterDiscordId; } - KeyCode::Esc => { - debug!("User rejected the TCs, moving to RejectTCs"); - self.state = BetaProgrammeState::RejectTCs; - } - _ => {} + vec![] } - vec![] - } + KeyCode::Esc => { + debug!("User rejected the TCs, moving to original screen"); + self.state = BetaProgrammeState::ShowTCs; + vec![Action::SwitchScene(self.back_to)] + } + _ => { + vec![] + } + }, BetaProgrammeState::RejectTCs => { if let KeyCode::Esc = key.code { debug!("RejectTCs msg closed. Switching to Status scene."); @@ -146,7 +167,7 @@ impl Component for BetaProgramme { Action::SwitchScene(scene) => match scene { Scene::StatusBetaProgrammePopUp | Scene::OptionsBetaProgrammePopUp => { self.active = true; - self.old_value = self.discord_input_filed.value().to_string(); + self.old_value = self.discord_input_field.value().to_string(); if scene == Scene::StatusBetaProgrammePopUp { self.back_to = Scene::Status; } else if scene == Scene::OptionsBetaProgrammePopUp { @@ -190,7 +211,7 @@ impl Component for BetaProgramme { let pop_up_border = Paragraph::new("").block( Block::default() .borders(Borders::ALL) - .title(" Beta Rewards Program ") + .title(" Add Your Wallet ") .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) @@ -200,7 +221,8 @@ impl Component for BetaProgramme { match self.state { BetaProgrammeState::DiscordIdAlreadySet => { - // split into 4 parts, for the prompt, input, text, dash , and buttons + self.validate(); // FIXME: maybe this should be somewhere else + // split into 4 parts, for the prompt, input, text, dash , and buttons let layer_two = Layout::new( Direction::Vertical, [ @@ -218,27 +240,40 @@ impl Component for BetaProgramme { ) .split(layer_one[1]); - let prompt_text = Paragraph::new("Discord Username associated with this device:") - .block(Block::default()) - .alignment(Alignment::Center) - .fg(GHOST_WHITE); + let prompt_text = Paragraph::new(Line::from(vec![ + Span::styled("Enter new ".to_string(), Style::default()), + Span::styled("Wallet Address".to_string(), Style::default().bold()), + ])) + .block(Block::default()) + .alignment(Alignment::Center) + .fg(GHOST_WHITE); f.render_widget(prompt_text, layer_two[0]); let spaces = " ".repeat( - (INPUT_AREA_USERNAME - 1) as usize - self.discord_input_filed.value().len(), + (INPUT_AREA_USERNAME - 1) as usize - self.discord_input_field.value().len(), ); let input = Paragraph::new(Span::styled( - format!("{}{} ", spaces, self.discord_input_filed.value()), - Style::default().fg(VIVID_SKY_BLUE).bg(INDIGO).underlined(), + format!("{}{} ", spaces, self.discord_input_field.value()), + Style::default() + .fg(if self.can_save { VIVID_SKY_BLUE } else { RED }) + .bg(INDIGO) + .underlined(), )) .alignment(Alignment::Center); f.render_widget(input, layer_two[1]); - let text = Paragraph::new(Text::from(vec![ - Line::raw("Changing your Username will reset all nodes,"), - Line::raw("and any Nanos left on this device will be lost."), - ])) + let text = Paragraph::new(Text::from(if self.can_save { + vec![ + Line::raw("Changing your Wallet will reset and restart"), + Line::raw("all your nodes."), + ] + } else { + vec![Line::from(Span::styled( + "Invalid wallet address".to_string(), + Style::default().fg(RED), + ))] + })) .alignment(Alignment::Center) .block( Block::default() @@ -260,15 +295,19 @@ impl Component for BetaProgramme { .split(layer_two[4]); let button_no = Line::from(vec![Span::styled( - " No, Cancel [Esc]", + " Cancel [Esc]", Style::default().fg(LIGHT_PERIWINKLE), )]); f.render_widget(button_no, buttons_layer[0]); let button_yes = Line::from(vec![Span::styled( - "Save Username [Enter]", - Style::default().fg(EUCALYPTUS), + "Change Wallet [Enter]", + if self.can_save { + Style::default().fg(EUCALYPTUS) + } else { + Style::default().fg(LIGHT_PERIWINKLE) + }, )]); f.render_widget(button_yes, buttons_layer[1]); } @@ -290,9 +329,9 @@ impl Component for BetaProgramme { .split(layer_one[1]); let text = Paragraph::new(vec![ - Line::from(Span::styled("Earn a slice of millions of tokens created at the genesis of the Autonomi Network by running nodes to build and test the Beta.",Style::default())), + Line::from(Span::styled("Add your wallet and you can earn a slice of millions of tokens created at the genesis of the Autonomi Network when through running nodes.",Style::default())), Line::from(Span::styled("\n\n",Style::default())), - Line::from(Span::styled("To continue in the beta Rewards Program you agree to the Terms and Conditions found here:",Style::default())), + Line::from(Span::styled("By continuing you agree to the Terms and Conditions found here:",Style::default())), Line::from(Span::styled("\n\n",Style::default())), ] ) @@ -327,10 +366,12 @@ impl Component for BetaProgramme { Style::default().fg(LIGHT_PERIWINKLE), )]); f.render_widget(button_no, buttons_layer[0]); - let button_yes = Line::from(vec![Span::styled( - "Yes, I agree! Continue [Y]", + + let button_yes = Paragraph::new(Line::from(vec![Span::styled( + "Yes, I agree! Continue [Y] ", Style::default().fg(EUCALYPTUS), - )]); + )])) + .alignment(Alignment::Right); f.render_widget(button_yes, buttons_layer[1]); } BetaProgrammeState::RejectTCs => { @@ -381,7 +422,9 @@ impl Component for BetaProgramme { // for the input Constraint::Length(2), // for the text - Constraint::Length(5), + Constraint::Length(3), + // for the hyperlink + Constraint::Length(2), // gap Constraint::Length(1), // for the buttons @@ -390,57 +433,68 @@ impl Component for BetaProgramme { ) .split(layer_one[1]); - let prompt = - Paragraph::new("Enter your Discord Username").alignment(Alignment::Center); + let prompt = Paragraph::new(Line::from(vec![ + Span::styled("Enter your ", Style::default()), + Span::styled("Wallet Address", Style::default().fg(GHOST_WHITE)), + ])) + .alignment(Alignment::Center); f.render_widget(prompt.fg(GHOST_WHITE), layer_two[0]); let spaces = " ".repeat( - (INPUT_AREA_USERNAME - 1) as usize - self.discord_input_filed.value().len(), + (INPUT_AREA_USERNAME - 1) as usize - self.discord_input_field.value().len(), ); let input = Paragraph::new(Span::styled( - format!("{}{} ", spaces, self.discord_input_filed.value()), + format!("{}{} ", spaces, self.discord_input_field.value()), Style::default().fg(VIVID_SKY_BLUE).bg(INDIGO).underlined(), )) .alignment(Alignment::Center); f.render_widget(input, layer_two[1]); - let text = Paragraph::new(vec![ - Line::from(Span::styled( - "Submit your username and track your progress on our Discord server.", - Style::default(), - )), - Line::from(Span::styled("\n\n", Style::default())), - Line::from(Span::styled( - "Note: your username may be different from your display name.", - Style::default(), - )), - ]) + let text = Paragraph::new(vec![Line::from(Span::styled( + "Find out more about compatible wallets, and how to track your earnings:", + Style::default(), + ))]) .block(Block::default().padding(Padding::horizontal(2))) .wrap(Wrap { trim: false }); f.render_widget(text.fg(GHOST_WHITE), layer_two[2]); + let link = Hyperlink::new( + Span::styled( + " https://autonomi.com/wallet", + Style::default().fg(VIVID_SKY_BLUE), + ), + "https://autonomi.com/wallet", + ); + + f.render_widget_ref(link, layer_two[3]); + let dash = Block::new() .borders(Borders::BOTTOM) .border_style(Style::new().fg(GHOST_WHITE)); - f.render_widget(dash, layer_two[3]); + f.render_widget(dash, layer_two[4]); let buttons_layer = Layout::horizontal(vec![ Constraint::Percentage(50), Constraint::Percentage(50), ]) - .split(layer_two[4]); + .split(layer_two[5]); let button_no = Line::from(vec![Span::styled( - " No, Cancel [Esc]", + " Cancel [Esc]", Style::default().fg(LIGHT_PERIWINKLE), )]); f.render_widget(button_no, buttons_layer[0]); - let button_yes = Line::from(vec![Span::styled( - "Submit Username [Enter]", - Style::default().fg(EUCALYPTUS), - )]); + let button_yes = Paragraph::new(Line::from(vec![Span::styled( + "Save Wallet [Enter] ", + if self.can_save { + Style::default().fg(EUCALYPTUS) + } else { + Style::default().fg(LIGHT_PERIWINKLE) + }, + )])) + .alignment(Alignment::Right); f.render_widget(button_yes, buttons_layer[1]); } } diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 0f6dea98b5..90706c488d 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -30,7 +30,7 @@ use crate::{ clear_area, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VERY_LIGHT_AZURE, VIVID_SKY_BLUE, }, }; -use color_eyre::eyre::{OptionExt, Result}; +use color_eyre::eyre::{Ok, OptionExt, Result}; use crossterm::event::KeyEvent; use ratatui::text::Span; use ratatui::{prelude::*, widgets::*}; @@ -40,7 +40,7 @@ use sn_peers_acquisition::PeersArgs; use sn_service_management::{ control::ServiceController, NodeRegistry, NodeServiceData, ServiceStatus, }; -use std::collections::HashMap; +use std::fmt; use std::{ path::PathBuf, time::{Duration, Instant}, @@ -50,27 +50,42 @@ use tokio::sync::mpsc::UnboundedSender; use super::super::node_mgmt::{maintain_n_running_nodes, reset_nodes, stop_nodes}; -use throbber_widgets_tui::{self, ThrobberState}; +use throbber_widgets_tui::{self, Throbber, ThrobberState}; pub const NODE_STAT_UPDATE_INTERVAL: Duration = Duration::from_secs(5); /// If nat detection fails for more than 3 times, we don't want to waste time running during every node start. const MAX_ERRORS_WHILE_RUNNING_NAT_DETECTION: usize = 3; +// Table Widths +const NODE_WIDTH: usize = 10; +const VERSION_WIDTH: usize = 7; +const ATTOS_WIDTH: usize = 5; +const MEMORY_WIDTH: usize = 7; +const MBPS_WIDTH: usize = 15; +const RECORDS_WIDTH: usize = 4; +const PEERS_WIDTH: usize = 5; +const CONNS_WIDTH: usize = 5; +const STATUS_WIDTH: usize = 8; +const SPINNER_WIDTH: usize = 1; + #[derive(Clone)] -pub struct Status { +pub struct Status<'a> { /// Whether the component is active right now, capturing keystrokes + drawing things. active: bool, action_sender: Option>, config: Config, - // state - node_services: Vec, - node_services_throttle_state: HashMap, + // NAT is_nat_status_determined: bool, error_while_running_nat_detection: usize, + // Device Stats Section node_stats: NodeStats, node_stats_last_update: Instant, - node_table_state: TableState, + // Nodes + node_services: Vec, + items: Option>>, + // Amount of nodes nodes_to_start: usize, + // Discord username discord_username: String, // Currently the node registry file does not support concurrent actions and thus can lead to // inconsistent state. Another solution would be to have a file lock/db. @@ -108,21 +123,20 @@ pub struct StatusConfig { pub port_to: Option, } -impl Status { +impl Status<'_> { pub async fn new(config: StatusConfig) -> Result { let mut status = Self { peers_args: config.peers_args, action_sender: Default::default(), config: Default::default(), active: true, - node_services: Default::default(), - node_services_throttle_state: HashMap::new(), is_nat_status_determined: false, error_while_running_nat_detection: 0, node_stats: NodeStats::default(), node_stats_last_update: Instant::now(), + node_services: Default::default(), + items: None, nodes_to_start: config.allocated_disk_space, - node_table_state: Default::default(), lock_registry: None, discord_username: config.discord_username, safenode_path: config.safenode_path, @@ -133,6 +147,7 @@ impl Status { error_popup: None, }; + // Nodes registry let now = Instant::now(); debug!("Refreshing node registry states on startup"); let mut node_registry = NodeRegistry::load(&get_node_registry_path()?)?; @@ -151,6 +166,113 @@ impl Status { Ok(status) } + fn update_node_items(&mut self) -> Result<()> { + // Iterate over existing node services and update their corresponding NodeItem + if let Some(ref mut items) = self.items { + for node_item in self.node_services.iter() { + // Find the corresponding item by service name + if let Some(item) = items + .items + .iter_mut() + .find(|i| i.name == node_item.service_name) + { + // Update status based on current node status + item.status = match node_item.status { + ServiceStatus::Running => { + // Call calc_next on the spinner state + item.spinner_state.calc_next(); + NodeStatus::Running + } + ServiceStatus::Stopped => NodeStatus::Stopped, + ServiceStatus::Added => NodeStatus::Added, + ServiceStatus::Removed => NodeStatus::Removed, + }; + + // Starting is not part of ServiceStatus so we do it manually + if let Some(LockRegistryState::StartingNodes) = self.lock_registry { + item.spinner_state.calc_next(); + item.status = NodeStatus::Starting; + } + + // Update peers count + item.peers = match node_item.connected_peers { + Some(ref peers) => peers.len(), + None => 0, + }; + + // Update individual stats if available + if let Some(stats) = self + .node_stats + .individual_stats + .iter() + .find(|s| s.service_name == node_item.service_name) + { + item.attos = stats.forwarded_rewards; + item.memory = stats.memory_usage_mb; + item.mbps = format!( + "↓{:06.2} ↑{:06.2}", + stats.bandwidth_inbound as f64 / (1024_f64 * 1024_f64), + stats.bandwidth_outbound as f64 / (1024_f64 * 1024_f64) + ); + item.records = stats.max_records; + item.connections = stats.connections; + } + } else { + // If not found, create a new NodeItem and add it to items + let new_item = NodeItem { + name: node_item.service_name.clone(), + version: node_item.version.to_string(), + attos: 0, + memory: 0, + mbps: "-".to_string(), + records: 0, + peers: 0, + connections: 0, + status: NodeStatus::Added, // Set initial status as Added + spinner: Throbber::default(), + spinner_state: ThrobberState::default(), + }; + items.items.push(new_item); + } + } + } else { + // If items is None, create a new list (fallback) + let node_items: Vec = self + .node_services + .iter() + .filter_map(|node_item| { + if node_item.status == ServiceStatus::Removed { + return None; + } + // Update status based on current node status + let status = match node_item.status { + ServiceStatus::Running => NodeStatus::Running, + ServiceStatus::Stopped => NodeStatus::Stopped, + ServiceStatus::Added => NodeStatus::Added, + ServiceStatus::Removed => NodeStatus::Removed, + }; + + // Create a new NodeItem for the first time + Some(NodeItem { + name: node_item.service_name.clone().to_string(), + version: node_item.version.to_string(), + attos: 0, + memory: 0, + mbps: "-".to_string(), + records: 0, + peers: 0, + connections: 0, + status, + spinner: Throbber::default(), + spinner_state: ThrobberState::default(), + }) + }) + .collect(); + self.items = Some(StatefulTable::with_items(node_items)); + } + Ok(()) + } + /// Tries to trigger the update of node stats if the last update was more than `NODE_STAT_UPDATE_INTERVAL` ago. /// The result is sent via the StatusActions::NodesStatsObtained action. fn try_update_node_stats(&mut self, force_update: bool) -> Result<()> { @@ -180,11 +302,6 @@ impl Status { self.node_services.len() ); - if !self.node_services.is_empty() && self.node_table_state.selected().is_none() { - // self.node_table_state.select(Some(0)); - self.node_table_state.select(None); - } - Ok(()) } @@ -207,53 +324,9 @@ impl Status { }) .collect() } - - fn _select_next_table_item(&mut self) { - let i = match self.node_table_state.selected() { - Some(i) => { - if i >= self.node_services.len() - 1 { - 0 - } else { - i + 1 - } - } - None => 0, - }; - self.node_table_state.select(Some(i)); - } - - fn _select_previous_table_item(&mut self) { - let i = match self.node_table_state.selected() { - Some(i) => { - if i == 0 { - self.node_services.len() - 1 - } else { - i - 1 - } - } - None => 0, - }; - self.node_table_state.select(Some(i)); - } - - #[expect(dead_code)] - fn unselect_table_item(&mut self) { - self.node_table_state.select(None); - } - - #[expect(dead_code)] - fn get_service_name_of_selected_table_item(&self) -> Option { - let Some(service_idx) = self.node_table_state.selected() else { - warn!("No item selected from table, not removing anything"); - return None; - }; - self.node_services - .get(service_idx) - .map(|data| data.service_name.clone()) - } } -impl Component for Status { +impl Component for Status<'_> { fn register_action_handler(&mut self, tx: UnboundedSender) -> Result<()> { self.action_sender = Some(tx); @@ -280,9 +353,7 @@ impl Component for Status { match action { Action::Tick => { self.try_update_node_stats(false)?; - for (_spinner_key, spinner_state) in self.node_services_throttle_state.iter_mut() { - spinner_state.calc_next(); // Assuming calc_next() is a method of ThrobberState - } + let _ = self.update_node_items(); } Action::SwitchScene(scene) => match scene { Scene::Status | Scene::StatusBetaProgrammePopUp => { @@ -468,6 +539,7 @@ impl Component for Status { action_sender: action_sender.clone(), connection_mode: self.connection_mode, port_range: Some(port_range), + rewards_address: self.discord_username.clone(), }; debug!("Calling maintain_n_running_nodes"); @@ -490,7 +562,11 @@ impl Component for Status { stop_nodes(running_nodes, action_sender); } StatusActions::TriggerBetaProgramme => { - return Ok(Some(Action::SwitchScene(Scene::StatusBetaProgrammePopUp))); + if self.discord_username.is_empty() { + return Ok(Some(Action::SwitchScene(Scene::StatusBetaProgrammePopUp))); + } else { + return Ok(None); + } } }, Action::OptionsActions(OptionsActions::ResetNodes) => { @@ -590,54 +666,44 @@ impl Component for Status { let column_constraints = [Constraint::Length(23), Constraint::Fill(1)]; let stats_table = Table::new(stats_rows, stats_width).widths(column_constraints); - // Combine "Nanos Earned" and "Username" into a single row - let discord_username_placeholder = "Username: "; // Used to calculate the width of the username column - let discord_username_no_username = "[Ctrl+B] to set"; - let discord_username_title = Span::styled( - discord_username_placeholder, - Style::default().fg(VIVID_SKY_BLUE), - ); - - let discord_username = if !self.discord_username.is_empty() { - Span::styled( - self.discord_username.clone(), - Style::default().fg(VIVID_SKY_BLUE), - ) - .bold() + let wallet_not_set = if self.discord_username.is_empty() { + vec![ + Span::styled("Press ".to_string(), Style::default().fg(VIVID_SKY_BLUE)), + Span::styled("[Ctrl+B] ".to_string(), Style::default().fg(GHOST_WHITE)), + Span::styled( + "to add your ".to_string(), + Style::default().fg(VIVID_SKY_BLUE), + ), + Span::styled( + "Wallet Address".to_string(), + Style::default().fg(VIVID_SKY_BLUE).bold(), + ), + ] } else { - Span::styled( - discord_username_no_username, - Style::default().fg(GHOST_WHITE), - ) + vec![] }; - let total_nanos_earned_and_discord_row = Row::new(vec![ - Cell::new("Nanos Earned".to_string()).fg(VIVID_SKY_BLUE), + let total_attos_earned_and_wallet_row = Row::new(vec![ + Cell::new("Attos Earned".to_string()).fg(VIVID_SKY_BLUE), Cell::new(self.node_stats.total_forwarded_rewards.to_string()) .fg(VIVID_SKY_BLUE) .bold(), - Cell::new( - Line::from(vec![discord_username_title, discord_username]) - .alignment(Alignment::Right), - ), + Cell::new(Line::from(wallet_not_set).alignment(Alignment::Right)), ]); - let nanos_discord_rows = vec![total_nanos_earned_and_discord_row]; - let nanos_discord_width = [Constraint::Length(5)]; + let attos_wallet_rows = vec![total_attos_earned_and_wallet_row]; + let attos_wallet_width = [Constraint::Length(5)]; let column_constraints = [ Constraint::Length(23), Constraint::Fill(1), - Constraint::Length( - discord_username_placeholder.len() as u16 - + if !self.discord_username.is_empty() { - self.discord_username.len() as u16 - } else { - discord_username_no_username.len() as u16 - }, - ), + Constraint::Length(if self.discord_username.is_empty() { + 41 //TODO: make it dynamic with wallet_not_set + } else { + 0 + }), ]; - let nanos_discord_table = - Table::new(nanos_discord_rows, nanos_discord_width).widths(column_constraints); + let attos_wallet_table = + Table::new(attos_wallet_rows, attos_wallet_width).widths(column_constraints); let inner_area = combined_block.inner(layout[1]); let device_layout = Layout::new( @@ -648,266 +714,140 @@ impl Component for Status { // Render both tables inside the combined block f.render_widget(stats_table, device_layout[0]); - f.render_widget(nanos_discord_table, device_layout[1]); + f.render_widget(attos_wallet_table, device_layout[1]); // ==== Node Status ===== - // Widths - const NODE_WIDTH: usize = 10; - const VERSION_WIDTH: usize = 7; - const NANOS_WIDTH: usize = 5; - const MEMORY_WIDTH: usize = 7; - const MBPS_WIDTH: usize = 13; - const RECORDS_WIDTH: usize = 4; - const PEERS_WIDTH: usize = 5; - const CONNS_WIDTH: usize = 5; - const STATUS_WIDTH: usize = 8; - const SPINNER_WIDTH: usize = 1; - - let node_rows: Vec<_> = self - .node_services - .iter() - .filter_map(|n| { - if n.status == ServiceStatus::Removed { - return None; - } - - let mut status = format!("{:?}", n.status); - if let Some(LockRegistryState::StartingNodes) = self.lock_registry { - status = "Starting".to_string(); - } - let connected_peers = match n.connected_peers { - Some(ref peers) => format!("{:?}", peers.len()), - None => "0".to_string(), - }; - - let mut nanos = "-".to_string(); - let mut memory = "-".to_string(); - let mut mbps = " -".to_string(); - let mut records = "-".to_string(); - let mut connections = "-".to_string(); - - let individual_stats = self - .node_stats - .individual_stats - .iter() - .find(|s| s.service_name == n.service_name); - if let Some(stats) = individual_stats { - nanos = stats.forwarded_rewards.to_string(); - memory = stats.memory_usage_mb.to_string(); - mbps = format!( - "↓{:05.2} ↑{:05.2}", - stats.bandwidth_inbound as f64 / (1024_f64 * 1024_f64), - stats.bandwidth_outbound as f64 / (1024_f64 * 1024_f64) - ); - records = stats.max_records.to_string(); - connections = stats.connections.to_string(); - } - - // Create a row vector - let row = vec![ - n.service_name.clone().to_string(), - n.version.to_string(), - format!( - "{}{}", - " ".repeat(NANOS_WIDTH.saturating_sub(nanos.len())), - nanos.to_string() - ), - format!( - "{}{} MB", - " ".repeat(MEMORY_WIDTH.saturating_sub(memory.len() + 4)), - memory.to_string() - ), - mbps.to_string(), - format!( - "{}{}", - " ".repeat(RECORDS_WIDTH.saturating_sub(records.len())), - records.to_string() - ), + // No nodes. Empty Table. + if let Some(ref items) = self.items { + if items.items.is_empty() { + let line1 = Line::from(vec![ + Span::styled("Press ", Style::default().fg(LIGHT_PERIWINKLE)), + Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE).bold()), + Span::styled("to Add and ", Style::default().fg(LIGHT_PERIWINKLE)), + Span::styled("Start Nodes ", Style::default().fg(GHOST_WHITE).bold()), + Span::styled("on this device", Style::default().fg(LIGHT_PERIWINKLE)), + ]); + + let line2 = Line::from(vec![Span::styled( format!( - "{}{}", - " ".repeat(PEERS_WIDTH.saturating_sub(connected_peers.len())), - connected_peers.to_string() + "Each node will use {}GB of storage and a small amount of memory, \ + CPU, and Network bandwidth. Most computers can run many nodes at once, \ + but we recommend you add them gradually", + GB_PER_NODE ), - format!( - "{}{}", - " ".repeat(CONNS_WIDTH.saturating_sub(connections.len())), - connections.to_string() - ), - status.to_string(), + Style::default().fg(LIGHT_PERIWINKLE), + )]); + + f.render_widget( + Paragraph::new(vec![Line::raw(""), line1, Line::raw(""), line2]) + .wrap(Wrap { trim: false }) + .fg(LIGHT_PERIWINKLE) + .block( + Block::default() + .title(Line::from(vec![ + Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), + Span::styled(" (0) ", Style::default().fg(LIGHT_PERIWINKLE)), + ])) + .title_style(Style::default().fg(LIGHT_PERIWINKLE)) + .borders(Borders::ALL) + .border_style(style::Style::default().fg(EUCALYPTUS)) + .padding(Padding::horizontal(1)), + ), + layout[2], + ); + } else { + // Node/s block + let block_nodes = Block::default() + .title(Line::from(vec![ + Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), + Span::styled( + format!(" ({}) ", self.nodes_to_start), + Style::default().fg(LIGHT_PERIWINKLE), + ), + ])) + .padding(Padding::new(1, 1, 0, 0)) + .title_style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .border_style(Style::default().fg(EUCALYPTUS)); + + // Split the inner area of the combined block + let inner_area = block_nodes.inner(layout[2]); + + // Column Widths + let node_widths = [ + Constraint::Min(NODE_WIDTH as u16), + Constraint::Min(VERSION_WIDTH as u16), + Constraint::Min(ATTOS_WIDTH as u16), + Constraint::Min(MEMORY_WIDTH as u16), + Constraint::Min(MBPS_WIDTH as u16), + Constraint::Min(RECORDS_WIDTH as u16), + Constraint::Min(PEERS_WIDTH as u16), + Constraint::Min(CONNS_WIDTH as u16), + Constraint::Min(STATUS_WIDTH as u16), + Constraint::Max(SPINNER_WIDTH as u16), ]; - // Create a styled row - let row_style = if n.status == ServiceStatus::Running { - Style::default().fg(EUCALYPTUS) - } else { - Style::default().fg(GHOST_WHITE) - }; - - Some(Row::new(row).style(row_style)) - }) - .collect(); - - if node_rows.is_empty() { - let line1 = Line::from(vec![ - Span::styled("Press ", Style::default().fg(LIGHT_PERIWINKLE)), - Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE).bold()), - Span::styled("to Add and ", Style::default().fg(LIGHT_PERIWINKLE)), - Span::styled("Start Nodes ", Style::default().fg(GHOST_WHITE).bold()), - Span::styled("on this device", Style::default().fg(LIGHT_PERIWINKLE)), - ]); - - let line2 = Line::from(vec![Span::styled( - format!( - "Each node will use {}GB of storage and a small amount of memory, \ - CPU, and Network bandwidth. Most computers can run many nodes at once, \ - but we recommend you add them gradually", - GB_PER_NODE - ), - Style::default().fg(LIGHT_PERIWINKLE), - )]); - - f.render_widget( - Paragraph::new(vec![Line::raw(""), line1, Line::raw(""), line2]) - .wrap(Wrap { trim: false }) - .fg(LIGHT_PERIWINKLE) - .block( - Block::default() - .title(Line::from(vec![ - Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), - Span::styled(" (0) ", Style::default().fg(LIGHT_PERIWINKLE)), - ])) - .title_style(Style::default().fg(LIGHT_PERIWINKLE)) - .borders(Borders::ALL) - .border_style(style::Style::default().fg(EUCALYPTUS)) - .padding(Padding::horizontal(1)), - ), - layout[2], - ); - } else { - // Node/s block - let block_nodes = Block::default() - .title(Line::from(vec![ - Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), - Span::styled( - format!(" ({}) ", self.nodes_to_start), - Style::default().fg(LIGHT_PERIWINKLE), + // Header + let header_row = Row::new(vec![ + Cell::new("Node").fg(COOL_GREY), + Cell::new("Version").fg(COOL_GREY), + Cell::new("Attos").fg(COOL_GREY), + Cell::new("Memory").fg(COOL_GREY), + Cell::new( + format!("{}{}", " ".repeat(MBPS_WIDTH - "Mbps".len()), "Mbps") + .fg(COOL_GREY), ), - ])) - .padding(Padding::new(1, 1, 0, 0)) - .title_style(Style::default().fg(GHOST_WHITE)) - .borders(Borders::ALL) - .border_style(Style::default().fg(EUCALYPTUS)); - - // Create a layout to arrange the header and table vertically - let inner_layout = Layout::new( - Direction::Vertical, - vec![Constraint::Length(1), Constraint::Min(0)], - ); - - // Split the inner area of the combined block - let inner_area = block_nodes.inner(layout[2]); - let inner_chunks = inner_layout.split(inner_area); - - // Column Widths - let node_widths = [ - Constraint::Min(NODE_WIDTH as u16), - Constraint::Min(VERSION_WIDTH as u16), - Constraint::Min(NANOS_WIDTH as u16), - Constraint::Min(MEMORY_WIDTH as u16), - Constraint::Min(MBPS_WIDTH as u16), - Constraint::Min(RECORDS_WIDTH as u16), - Constraint::Min(PEERS_WIDTH as u16), - Constraint::Min(CONNS_WIDTH as u16), - Constraint::Min(STATUS_WIDTH as u16), - Constraint::Max(SPINNER_WIDTH as u16), - ]; - - // Header - let header_row = Row::new(vec![ - Cell::new("Node").fg(COOL_GREY), - Cell::new("Version").fg(COOL_GREY), - Cell::new("Nanos").fg(COOL_GREY), - Cell::new("Memory").fg(COOL_GREY), - Cell::new( - format!("{}{}", " ".repeat(MBPS_WIDTH - "Mbps".len()), "Mbps").fg(COOL_GREY), - ), - Cell::new("Recs").fg(COOL_GREY), - Cell::new("Peers").fg(COOL_GREY), - Cell::new("Conns").fg(COOL_GREY), - Cell::new("Status").fg(COOL_GREY), - Cell::new(" ").fg(COOL_GREY), // Spinner - ]); - - let header = Table::new(vec![header_row.clone()], node_widths) + Cell::new("Recs").fg(COOL_GREY), + Cell::new("Peers").fg(COOL_GREY), + Cell::new("Conns").fg(COOL_GREY), + Cell::new("Status").fg(COOL_GREY), + Cell::new(" ").fg(COOL_GREY), // Spinner + ]) .style(Style::default().add_modifier(Modifier::BOLD)); - // Table items - let table = Table::new(node_rows.clone(), node_widths) - .column_spacing(1) - .highlight_style(Style::default().bg(INDIGO)) - .highlight_spacing(HighlightSpacing::Always); - - f.render_stateful_widget(header, inner_chunks[0], &mut self.node_table_state); - f.render_stateful_widget(table, inner_chunks[1], &mut self.node_table_state); - - // Render the throbber in the last column for running nodes - for (i, node) in self.node_services.iter().enumerate() { - let mut throbber = throbber_widgets_tui::Throbber::default() - .throbber_set(throbber_widgets_tui::BRAILLE_SIX_DOUBLE); - match node.status { - ServiceStatus::Running => { - throbber = throbber - .throbber_style( - Style::default().fg(EUCALYPTUS).add_modifier(Modifier::BOLD), - ) - .use_type(throbber_widgets_tui::WhichUse::Spin); - } - ServiceStatus::Stopped => { - throbber = throbber - .throbber_style( - Style::default() - .fg(GHOST_WHITE) - .add_modifier(Modifier::BOLD), - ) - .use_type(throbber_widgets_tui::WhichUse::Full); - } - _ => {} - } - if let Some(LockRegistryState::StartingNodes) = self.lock_registry { - throbber = throbber - .throbber_style( - Style::default().fg(EUCALYPTUS).add_modifier(Modifier::BOLD), - ) - .throbber_set(throbber_widgets_tui::BOX_DRAWING) - .use_type(throbber_widgets_tui::WhichUse::Spin); - } - let throbber_area = - Rect::new(inner_chunks[1].width, inner_chunks[1].y + i as u16, 1, 1); - let throttle_state = self - .node_services_throttle_state - .entry(node.service_name.clone()) - .or_default(); - f.render_stateful_widget(throbber, throbber_area, throttle_state); + let items: Vec = self + .items + .as_mut() + .unwrap() + .items + .iter_mut() + .enumerate() + .map(|(i, node_item)| node_item.render_as_row(i, layout[2], f)) + .collect(); + + // Table items + let table = Table::new(items, node_widths) + .header(header_row) + .column_spacing(1) + .highlight_style(Style::default().bg(INDIGO)) + .highlight_spacing(HighlightSpacing::Always); + + f.render_widget(table, inner_area); + + f.render_widget(block_nodes, layout[2]); } - f.render_widget(block_nodes, layout[2]); } // ==== Footer ===== let footer = Footer::default(); - let footer_state = if !node_rows.is_empty() { - if !self.get_running_nodes().is_empty() { - &mut NodesToStart::Running + let footer_state = if let Some(ref items) = self.items { + if !items.items.is_empty() { + if !self.get_running_nodes().is_empty() { + &mut NodesToStart::Running + } else { + &mut NodesToStart::Configured + } } else { - &mut NodesToStart::Configured + &mut NodesToStart::NotConfigured } } else { &mut NodesToStart::NotConfigured }; f.render_stateful_widget(footer, layout[3], footer_state); - // ===== Popup ===== + // ===== Popups ===== // Error Popup if let Some(error_popup) = &self.error_popup { @@ -1003,3 +943,167 @@ impl Component for Status { Ok(vec![]) } } + +#[allow(dead_code)] +#[derive(Default, Clone)] +struct StatefulTable { + state: TableState, + items: Vec, + last_selected: Option, +} + +#[allow(dead_code)] +impl StatefulTable { + fn with_items(items: Vec) -> Self { + StatefulTable { + state: TableState::default(), + items, + last_selected: None, + } + } + + fn next(&mut self) { + let i = match self.state.selected() { + Some(i) => { + if i >= self.items.len() - 1 { + 0 + } else { + i + 1 + } + } + None => self.last_selected.unwrap_or(0), + }; + self.state.select(Some(i)); + } + + fn previous(&mut self) { + let i = match self.state.selected() { + Some(i) => { + if i == 0 { + self.items.len() - 1 + } else { + i - 1 + } + } + None => self.last_selected.unwrap_or(0), + }; + self.state.select(Some(i)); + } +} + +#[derive(Default, Debug, Copy, Clone, PartialEq)] +enum NodeStatus { + #[default] + Added, + Running, + Starting, + Stopped, + Removed, +} + +impl fmt::Display for NodeStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + NodeStatus::Added => write!(f, "Added"), + NodeStatus::Running => write!(f, "Running"), + NodeStatus::Starting => write!(f, "Starting"), + NodeStatus::Stopped => write!(f, "Stopped"), + NodeStatus::Removed => write!(f, "Removed"), + } + } +} + +#[derive(Default, Debug, Clone)] +pub struct NodeItem<'a> { + name: String, + version: String, + attos: u64, + memory: usize, + mbps: String, + records: usize, + peers: usize, + connections: usize, + status: NodeStatus, + spinner: Throbber<'a>, + spinner_state: ThrobberState, +} + +impl NodeItem<'_> { + fn render_as_row(&mut self, index: usize, area: Rect, f: &mut Frame<'_>) -> Row { + let mut row_style = Style::default().fg(GHOST_WHITE); + let mut spinner_state = self.spinner_state.clone(); + match self.status { + NodeStatus::Running => { + self.spinner = self + .spinner + .clone() + .throbber_style(Style::default().fg(EUCALYPTUS).add_modifier(Modifier::BOLD)) + .throbber_set(throbber_widgets_tui::BRAILLE_SIX_DOUBLE) + .use_type(throbber_widgets_tui::WhichUse::Spin); + row_style = Style::default().fg(EUCALYPTUS); + } + NodeStatus::Starting => { + self.spinner = self + .spinner + .clone() + .throbber_style(Style::default().fg(EUCALYPTUS).add_modifier(Modifier::BOLD)) + .throbber_set(throbber_widgets_tui::BOX_DRAWING) + .use_type(throbber_widgets_tui::WhichUse::Spin); + } + NodeStatus::Stopped => { + self.spinner = self + .spinner + .clone() + .throbber_style( + Style::default() + .fg(GHOST_WHITE) + .add_modifier(Modifier::BOLD), + ) + .throbber_set(throbber_widgets_tui::BRAILLE_SIX_DOUBLE) + .use_type(throbber_widgets_tui::WhichUse::Full); + } + _ => {} + }; + + let row = vec![ + self.name.clone().to_string(), + self.version.to_string(), + format!( + "{}{}", + " ".repeat(ATTOS_WIDTH.saturating_sub(self.attos.to_string().len())), + self.attos.to_string() + ), + format!( + "{}{} MB", + " ".repeat(MEMORY_WIDTH.saturating_sub(self.memory.to_string().len() + 4)), + self.memory.to_string() + ), + format!( + "{}{}", + " ".repeat(MBPS_WIDTH.saturating_sub(self.mbps.to_string().len())), + self.mbps.to_string() + ), + format!( + "{}{}", + " ".repeat(RECORDS_WIDTH.saturating_sub(self.records.to_string().len())), + self.records.to_string() + ), + format!( + "{}{}", + " ".repeat(PEERS_WIDTH.saturating_sub(self.peers.to_string().len())), + self.peers.to_string() + ), + format!( + "{}{}", + " ".repeat(CONNS_WIDTH.saturating_sub(self.connections.to_string().len())), + self.connections.to_string() + ), + self.status.to_string(), + ]; + let throbber_area = Rect::new(area.width - 2, area.y + 2 + index as u16, 1, 1); + + f.render_stateful_widget(self.spinner.clone(), throbber_area, &mut spinner_state); + + Row::new(row).style(row_style) + } +} diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs index 893523f245..2c3b6205a9 100644 --- a/node-launchpad/src/node_mgmt.rs +++ b/node-launchpad/src/node_mgmt.rs @@ -1,19 +1,16 @@ -use std::path::PathBuf; - +use crate::action::{Action, StatusActions}; +use crate::connection_mode::ConnectionMode; use color_eyre::eyre::{eyre, Error}; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_node_manager::{ add_services::config::PortRange, config::get_node_registry_path, VerbosityLevel, }; use sn_peers_acquisition::PeersArgs; +use sn_releases::{self, ReleaseType, SafeReleaseRepoActions}; use sn_service_management::NodeRegistry; +use std::{path::PathBuf, str::FromStr}; use tokio::sync::mpsc::UnboundedSender; -use crate::action::{Action, StatusActions}; - -use crate::connection_mode::ConnectionMode; - -use sn_releases::{self, ReleaseType, SafeReleaseRepoActions}; - pub const PORT_MAX: u32 = 65535; pub const PORT_MIN: u32 = 1024; @@ -54,6 +51,7 @@ pub struct MaintainNodesArgs { pub action_sender: UnboundedSender, pub connection_mode: ConnectionMode, pub port_range: Option, + pub rewards_address: String, } /// Maintain the specified number of nodes @@ -175,6 +173,7 @@ struct NodeConfig { data_dir_path: Option, peers_args: PeersArgs, safenode_path: Option, + rewards_address: String, } /// Run the NAT detection process @@ -237,6 +236,7 @@ fn prepare_node_config(args: &MaintainNodesArgs) -> NodeConfig { data_dir_path: args.data_dir_path.clone(), peers_args: args.peers_args.clone(), safenode_path: args.safenode_path.clone(), + rewards_address: args.rewards_address.clone(), } } @@ -291,6 +291,7 @@ async fn scale_down_nodes(config: &NodeConfig, count: u16) { config.data_dir_path.clone(), true, None, + Some(EvmNetwork::ArbitrumSepolia), //FIXME: should come from an UI element. config.home_network, false, None, @@ -302,6 +303,7 @@ async fn scale_down_nodes(config: &NodeConfig, count: u16) { None, // We don't care about the port, as we are scaling down config.owner.clone(), config.peers_args.clone(), + RewardsAddress::from_str(config.rewards_address.as_str()).unwrap(), None, None, config.safenode_path.clone(), @@ -364,6 +366,7 @@ async fn add_nodes( config.data_dir_path.clone(), true, None, + Some(EvmNetwork::ArbitrumSepolia), //FIXME: Should come from an UI element config.home_network, false, None, @@ -375,6 +378,7 @@ async fn add_nodes( port_range, config.owner.clone(), config.peers_args.clone(), + RewardsAddress::from_str(config.rewards_address.as_str()).unwrap(), None, None, config.safenode_path.clone(), @@ -410,9 +414,9 @@ async fn add_nodes( { if let Err(err) = action_sender.send(Action::StatusActions( StatusActions::ErrorScalingUpNodes { - raw_error: "When trying to add a node, we failed.\n\n\ - Maybe you ran out of disk space?\n\n\ - Maybe you need to change the port range?\n\n" + raw_error: "When trying to add a node, we failed.\n\ + Maybe you ran out of disk space?\n\ + Maybe you need to change the port range?" .to_string(), }, )) { @@ -432,7 +436,9 @@ async fn add_nodes( if let Err(err) = action_sender.send(Action::StatusActions(StatusActions::ErrorScalingUpNodes { raw_error: format!( - "When trying run a node, we reached the maximum amount of retries ({}).", + "When trying run a node, we reached the maximum amount of retries ({}).\n\ + Could this be a firewall blocking nodes starting?\n\ + Or ports on your router already in use?", NODE_ADD_MAX_RETRIES ), })) diff --git a/node-launchpad/src/utils.rs b/node-launchpad/src/utils.rs index ffb997246c..02b6b72fa1 100644 --- a/node-launchpad/src/utils.rs +++ b/node-launchpad/src/utils.rs @@ -14,15 +14,6 @@ use tracing_subscriber::{ self, prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, Layer, }; -const VERSION_MESSAGE: &str = concat!( - env!("CARGO_PKG_VERSION"), - "-", - env!("VERGEN_GIT_DESCRIBE"), - " (", - env!("VERGEN_BUILD_DATE"), - ")" -); - pub fn initialize_panic_handler() -> Result<()> { let (panic_hook, eyre_hook) = color_eyre::config::HookBuilder::default() .panic_section(format!( @@ -132,18 +123,3 @@ macro_rules! trace_dbg { trace_dbg!(level: tracing::Level::DEBUG, $ex) }; } - -pub fn version() -> String { - let author = clap::crate_authors!(); - - let data_dir_path = get_launchpad_data_dir_path().unwrap().display().to_string(); - - format!( - "\ -{VERSION_MESSAGE} - -Authors: {author} - -Data directory: {data_dir_path}" - ) -} diff --git a/release-cycle-info b/release-cycle-info index 2b83422132..884a0ad6bf 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -14,5 +14,5 @@ # number for all the released binaries. release-year: 2024 release-month: 10 -release-cycle: 2 -release-cycle-counter: 3 +release-cycle: 3 +release-cycle-counter: 2 diff --git a/resources/scripts/bump_version_for_rc.sh b/resources/scripts/bump_version_for_rc.sh index 655345e199..dd5e50303f 100755 --- a/resources/scripts/bump_version_for_rc.sh +++ b/resources/scripts/bump_version_for_rc.sh @@ -80,12 +80,10 @@ done echo "=======================" echo " New Binary Versions " echo "=======================" -echo "faucet: $(grep "^version" < sn_faucet/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "nat-detection: $(grep "^version" < nat-detection/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "node-launchpad: $(grep "^version" < node-launchpad/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" -echo "safe: $(grep "^version" < sn_cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "autonomi: $(grep "^version" < autonomi-cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode: $(grep "^version" < sn_node/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode-manager: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode_rpc_client: $(grep "^version" < sn_node_rpc_client/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenodemand: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" -echo "sn_auditor: $(grep "^version" < sn_auditor/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" diff --git a/resources/scripts/print-versions.sh b/resources/scripts/print-versions.sh index b2a75fdb49..c3cb26ab6a 100755 --- a/resources/scripts/print-versions.sh +++ b/resources/scripts/print-versions.sh @@ -16,12 +16,10 @@ done echo "===================" echo " Binary Versions " echo "===================" -echo "faucet: $(grep "^version" < sn_faucet/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "nat-detection: $(grep "^version" < nat-detection/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "node-launchpad: $(grep "^version" < node-launchpad/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" -echo "safe: $(grep "^version" < sn_cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "autonomi: $(grep "^version" < autonomi-cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode: $(grep "^version" < sn_node/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode-manager: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode_rpc_client: $(grep "^version" < sn_node_rpc_client/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenodemand: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" -echo "sn_auditor: $(grep "^version" < sn_auditor/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml index 675ab2fcd7..f89d345672 100644 --- a/sn_auditor/Cargo.toml +++ b/sn_auditor/Cargo.toml @@ -11,11 +11,9 @@ readme = "README.md" [features] default = [] -local-discovery = [ - "sn_client/local-discovery", - "sn_peers_acquisition/local-discovery", -] +local = ["sn_client/local", "sn_peers_acquisition/local"] network-contacts = ["sn_peers_acquisition/network-contacts"] +nightly = [] open-metrics = ["sn_client/open-metrics"] websockets = ["sn_client/websockets"] svg-dag = ["graphviz-rust", "dag-collection"] @@ -31,9 +29,11 @@ graphviz-rust = { version = "0.9.0", optional = true } lazy_static = "1.4.0" serde = { version = "1.0.133", features = ["derive", "rc"] } serde_json = "1.0.108" +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } sn_client = { path = "../sn_client", version = "0.110.4" } sn_logging = { path = "../sn_logging", version = "0.2.36" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11" } tiny_http = { version = "0.12", features = ["ssl-rustls"] } tracing = { version = "~0.1.26" } tokio = { version = "1.32.0", features = [ diff --git a/sn_auditor/README.md b/sn_auditor/README.md index e8291f9f3d..1d8f96d59f 100644 --- a/sn_auditor/README.md +++ b/sn_auditor/README.md @@ -13,7 +13,7 @@ Running an auditor instance: cargo run --release --peer "/ip4/" # on a local testnet -cargo run --release --features=local-discovery +cargo run --release --features=local ``` It can be run with the following flags: diff --git a/sn_auditor/src/main.rs b/sn_auditor/src/main.rs index 5da0fea62e..8a340d55fe 100644 --- a/sn_auditor/src/main.rs +++ b/sn_auditor/src/main.rs @@ -19,6 +19,7 @@ use dag_db::SpendDagDb; use sn_client::Client; use sn_logging::{Level, LogBuilder, LogFormat, LogOutputDest}; use sn_peers_acquisition::PeersArgs; +use sn_protocol::version::IDENTIFY_PROTOCOL_STR; use std::collections::BTreeSet; use std::path::PathBuf; use tiny_http::{Response, Server}; @@ -27,7 +28,7 @@ use tiny_http::{Response, Server}; const BETA_REWARDS_BACKUP_INTERVAL_SECS: u64 = 20 * 60; #[derive(Parser)] -#[command(author, version, about, long_about = None)] +#[command(disable_version_flag = true)] struct Opt { #[command(flatten)] peers: PeersArgs, @@ -70,14 +71,59 @@ struct Opt { /// discord usernames of the beta participants #[clap(short = 'k', long, value_name = "hex_secret_key")] beta_encryption_key: Option, + + /// Print the crate version. + #[clap(long)] + pub crate_version: bool, + + /// Print the network protocol version. + #[clap(long)] + pub protocol_version: bool, + + /// Print the package version. + #[cfg(not(feature = "nightly"))] + #[clap(long)] + pub package_version: bool, + + /// Print version information. + #[clap(long)] + version: bool, } #[tokio::main] async fn main() -> Result<()> { let opt = Opt::parse(); + + if opt.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Auditor", + env!("CARGO_PKG_VERSION"), + Some(&IDENTIFY_PROTOCOL_STR) + ) + ); + return Ok(()); + } + + if opt.crate_version { + println!("{}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if opt.package_version { + println!("{}", sn_build_info::package_version()); + return Ok(()); + } + + if opt.protocol_version { + println!("{}", *IDENTIFY_PROTOCOL_STR); + return Ok(()); + } + let log_builder = logging_init(opt.log_output_dest, opt.log_format)?; let _log_handles = log_builder.initialize()?; - let beta_participants = load_and_update_beta_participants(opt.beta_participants)?; let maybe_sk = if let Some(sk_str) = opt.beta_encryption_key { @@ -148,7 +194,7 @@ async fn connect_to_network(peers_args: PeersArgs) -> Result { bootstrap_peers.len(), ); let bootstrap_peers = if bootstrap_peers.is_empty() { - // empty vec is returned if `local-discovery` flag is provided + // empty vec is returned if `local` flag is provided None } else { Some(bootstrap_peers) diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 1d255e20a6..de2f27b5cb 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,10 +8,18 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.15" +version = "0.1.16" +build = "build.rs" [build-dependencies] vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] } +[features] +nightly = [] + [lints] workspace = true + +[dependencies] +chrono = "0.4" +tracing = { version = "~0.1.26" } diff --git a/sn_build_info/build.rs b/sn_build_info/build.rs index 392c55da4e..7ca807729d 100644 --- a/sn_build_info/build.rs +++ b/sn_build_info/build.rs @@ -5,6 +5,8 @@ // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use std::fs; +use std::path::Path; use vergen::EmitBuilder; fn main() -> Result<(), Box> { @@ -18,5 +20,47 @@ fn main() -> Result<(), Box> { .git_describe(true, false, None) .emit()?; + let release_info_path = Path::new("../release-cycle-info"); + let contents = + fs::read_to_string(release_info_path).expect("Failed to read release-cycle-info"); + + let mut year = String::new(); + let mut month = String::new(); + let mut cycle = String::new(); + let mut counter = String::new(); + + for line in contents.lines() { + if line.starts_with("release-year:") { + year = line + .split(':') + .nth(1) + .map(|s| s.trim().to_string()) + .unwrap_or_default(); + } else if line.starts_with("release-month:") { + month = line + .split(':') + .nth(1) + .map(|s| s.trim().to_string()) + .unwrap_or_default(); + } else if line.starts_with("release-cycle:") { + cycle = line + .split(':') + .nth(1) + .map(|s| s.trim().to_string()) + .unwrap_or_default(); + } else if line.starts_with("release-cycle-counter:") { + counter = line + .split(':') + .nth(1) + .map(|s| s.trim().to_string()) + .unwrap_or_default(); + } + } + + println!("cargo:rustc-env=RELEASE_YEAR={year}"); + println!("cargo:rustc-env=RELEASE_MONTH={month}"); + println!("cargo:rustc-env=RELEASE_CYCLE={cycle}"); + println!("cargo:rustc-env=RELEASE_CYCLE_COUNTER={counter}"); + Ok(()) } diff --git a/sn_build_info/src/lib.rs b/sn_build_info/src/lib.rs index 6b858254ac..1e270f2a73 100644 --- a/sn_build_info/src/lib.rs +++ b/sn_build_info/src/lib.rs @@ -6,14 +6,15 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use chrono::Utc; +use tracing::debug; + /// Git information separated by slashes: ` / / ` pub const fn git_info() -> &'static str { concat!( - env!("VERGEN_GIT_SHA"), - " / ", env!("VERGEN_GIT_BRANCH"), " / ", - env!("VERGEN_GIT_DESCRIBE"), + env!("VERGEN_GIT_SHA"), " / ", env!("VERGEN_BUILD_DATE") ) @@ -33,3 +34,77 @@ pub const fn git_branch() -> &'static str { pub const fn git_sha() -> &'static str { env!("VERGEN_GIT_SHA") } + +/// Nightly version format: YYYY.MM.DD +pub fn nightly_version() -> String { + let now = Utc::now(); + now.format("%Y.%m.%d").to_string() +} + +/// Git information for nightly builds: ` / / ` +pub fn nightly_git_info() -> String { + format!("{} / {} / {}", nightly_version(), git_branch(), git_sha(),) +} + +pub fn package_version() -> String { + format!( + "{}.{}.{}.{}", + env!("RELEASE_YEAR"), + env!("RELEASE_MONTH"), + env!("RELEASE_CYCLE"), + env!("RELEASE_CYCLE_COUNTER") + ) +} + +pub fn full_version_info( + app_name: &str, + crate_version: &str, + protocol_version: Option<&str>, +) -> String { + let mut info = format!("{app_name} v{crate_version}"); + + if let Some(version) = protocol_version { + info.push_str(&format!("\nNetwork version: {version}")); + } + + info.push_str(&format!( + "\nPackage version: {}\nGit info: {}", + package_version(), + git_info() + )); + + info +} + +pub fn full_nightly_version_info(app_name: &str, protocol_version: Option<&str>) -> String { + let mut info = format!("{app_name} -- Nightly Release {}", nightly_version(),); + if let Some(version) = protocol_version { + info.push_str(&format!("\nNetwork version: {version}")); + } + info.push_str(&format!("\nGit info: {} / {}", git_branch(), git_sha(),)); + info +} + +pub fn version_string( + app_name: &str, + crate_version: &str, + protocol_version: Option<&str>, +) -> String { + if cfg!(feature = "nightly") { + full_nightly_version_info(app_name, protocol_version) + } else { + full_version_info(app_name, crate_version, protocol_version) + } +} + +pub fn log_version_info(crate_version: &str, protocol_version: &str) { + if cfg!(feature = "nightly") { + debug!("nightly build info: {}", nightly_git_info()); + debug!("network version: {protocol_version}"); + } else { + debug!("version: {crate_version}"); + debug!("network version: {protocol_version}"); + debug!("package version: {}", package_version()); + debug!("git info: {}", git_info()); + } +} diff --git a/sn_cli/CHANGELOG.md b/sn_cli/CHANGELOG.md deleted file mode 100644 index ddcfd25b77..0000000000 --- a/sn_cli/CHANGELOG.md +++ /dev/null @@ -1,3693 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [0.93.6](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.5...sn_cli-v0.93.6) - 2024-06-04 - -### Other -- release -- release -- *(release)* sn_client-v0.107.5/sn_networking-v0.16.3/sn_cli-v0.93.4/sn_node-v0.107.4/node-launchpad-v0.3.5/sn-node-manager-v0.9.4/sn_auditor-v0.1.23/sn_peers_acquisition-v0.3.3/sn_faucet-v0.4.25/sn_node_rpc_client-v0.6.22 -- *(network)* set metrics server to run on localhost - -## [0.93.5](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.4...sn_cli-v0.93.5) - 2024-06-04 - -### Fixed -- *(transfer)* mismatched key shall result in decryption error - -### Other -- *(transfer)* make discord_name decryption backward compatible - -## [0.93.4](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.3...sn_cli-v0.93.4) - 2024-06-04 - -### Other -- *(network)* set metrics server to run on localhost - -## [0.93.3](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.2...sn_cli-v0.93.3) - 2024-06-04 - -### Fixed -- *(faucet)* save the transfer not the cashnote for foundation - -### Other -- *(release)* sn_client-v0.107.3/sn_transfers-v0.18.4/sn_cli-v0.93.2/sn_node-v0.107.2/node-launchpad-v0.3.2/sn-node-manager-v0.9.2/sn_auditor-v0.1.20/sn_networking-v0.16.2/sn_protocol-v0.17.2/sn_faucet-v0.4.22/sn_service_management-v0.3.3/sn_node_rpc_client-v0.6.20 - -## [0.93.2](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.1...sn_cli-v0.93.2) - 2024-06-03 - -### Fixed -- enable compile time sk setting for faucet/genesis - -## [0.93.1](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.0...sn_cli-v0.93.1) - 2024-06-03 - -### Other -- bump versions to enable re-release with env vars at compilation - -## [0.93.0](https://github.com/joshuef/safe_network/compare/sn_cli-v0.92.0...sn_cli-v0.93.0) - 2024-06-03 - -### Added -- integrate DAG crawling fixes from Josh and Qi -- *(faucet)* write foundation cash note to disk -- *(client)* read existing mnemonic from disk if avilable -- *(networking)* add UPnP metrics -- *(network)* [**breaking**] move network versioning away from sn_protocol -- *(keys)* enable compile or runtime override of keys -- *(launchpad)* use nat detection server to determine the nat status - -### Fixed -- *(networking)* upnp feature gates for metrics -- *(networking)* conditional upnp metrics - -### Other -- *(cli)* showing cli final execution result explicitly -- rename DAG building to crawling -- spend verification error management -- *(networking)* cargo fmt -- use secrets during build process -- *(release)* sn_auditor-v0.1.17/sn_client-v0.106.3/sn_networking-v0.15.3/sn_transfers-v0.18.1/sn_logging-v0.2.27/sn_cli-v0.92.0/sn_faucet-v0.4.19/sn_node-v0.106.5/sn_service_management-v0.3.0/node-launchpad-v0.2.0/sn-node-manager-v0.8.0/sn_protocol-v0.16.7/sn_node_rpc_client-v0.6.18 - -## [0.92.0](https://github.com/joshuef/safe_network/compare/sn_cli-v0.91.4...sn_cli-v0.92.0) - 2024-05-24 - -### Added -- improved spend verification with DAG and fault detection -- upgrade cli audit to use DAG -- remove two uneeded env vars -- pass genesis_cn pub fields separate to hide sk -- pass sk_str via cli opt -- *(audit)* collect payment forward statistics -- *(client)* dump spends creation_reason statistics -- *(node)* make spend and cash_note reason field configurable -- *(cli)* readd wallet helper address for dist feat -- *(cli)* generate a mnemonic as wallet basis if no wallet found -- *(cli)* eip2333 helpers for accounts -- [**breaking**] renamings in CashNote -- [**breaking**] rename token to amount in Spend -- *(cli)* implement FilesUploadStatusNotifier trait for lib code -- *(cli)* return the files upload summary after a successful files upload -- unit testing dag, double spend poisoning tweaks -- report protocol mismatch error -- hide genesis keypair -- *(node)* use separate keys of Foundation and Royalty -- *(wallet)* ensure genesis wallet attempts to load from local on init first -- *(faucet)* increase initial balance -- *(faucet)* make gifting server feat dependent -- *(faucet)* send small amount to faucet, rest to foundation -- *(faucet)* add feat for gifting-from-genesis -- *(audit)* intercept sender of the payment forward -- spend reason enum and sized cipher -- *(metrics)* expose store cost value -- keep track of the estimated network size metric -- record lip2p relay and dctur metrics -- *(node)* periodically forward reward to specific address -- use default keys for genesis, or override -- use different key for payment forward -- hide genesis keypair -- tracking beta rewards from the DAG - -### Fixed -- audit flags activated independently -- reduce blabber in dot and royalties audit mode -- *(cli)* avoid mis-estimation due to overflow -- *(cli)* acct_packet tests updated -- more test and cli fixes -- update calls to HotWallet::load -- *(client)* move acct_packet mnemonic into client layer -- *(client)* ensure we have a wallet or generate one via mnemonic -- *(uploader)* do not error out immediately on max repayment errors -- *(node)* notify fetch completion earlier to avoid being skipped -- avoid adding mixed type addresses into RT -- enable libp2p metrics to be captured -- correct genesis_pk naming -- genesis_cn public fields generated from hard coded value -- invalid spend reason in data payments - -### Other -- further improve fast mode gathering speed -- improve cli DAG collection -- improve DAG collection perf -- *(release)* sn_auditor-v0.1.16/sn_cli-v0.91.4/sn_faucet-v0.4.18/sn_metrics-v0.1.7/sn_node-v0.106.4/sn_service_management-v0.2.8/node-launchpad-v0.1.5/sn-node-manager-v0.7.7/sn_node_rpc_client-v0.6.17 -- improve DAG verification redundancy -- *(release)* sn_auditor-v0.1.15/sn_cli-v0.91.3/sn_faucet-v0.4.17/sn_metrics-v0.1.6/sn_node-v0.106.3/sn_service_management-v0.2.7/node-launchpad-v0.1.2/sn_node_rpc_client-v0.6.16 -- resolve errors after reverts -- Revert "feat(node): make spend and cash_note reason field configurable" -- Revert "chore: refactor CASH_NOTE_REASON strings to consts" -- Revert "feat(client): dump spends creation_reason statistics" -- Revert "chore: address review comments" -- *(release)* sn_client-v0.106.2/sn_networking-v0.15.2/sn_cli-v0.91.2/sn_node-v0.106.2/sn_auditor-v0.1.14/sn_faucet-v0.4.16/sn_node_rpc_client-v0.6.15 -- *(release)* sn_auditor-v0.1.13/sn_client-v0.106.1/sn_networking-v0.15.1/sn_protocol-v0.16.6/sn_cli-v0.91.1/sn_faucet-v0.4.15/sn_node-v0.106.1/node-launchpad-v0.1.1/sn_node_rpc_client-v0.6.14/sn_peers_acquisition-v0.2.12/sn_service_management-v0.2.6 -- *(release)* sn_auditor-v0.1.12/sn_client-v0.106.0/sn_networking-v0.15.0/sn_transfers-v0.18.0/sn_peers_acquisition-v0.2.11/sn_logging-v0.2.26/sn_cli-v0.91.0/sn_faucet-v0.4.14/sn_metrics-v0.1.5/sn_node-v0.106.0/sn_service_management-v0.2.5/test_utils-v0.4.1/node-launchpad-v/sn-node-manager-v0.7.5/sn_node_rpc_client-v0.6.13/token_supplies-v0.1.48/sn_protocol-v0.16.5 -- *(versions)* sync versions with latest crates.io vs -- address review comments -- refactor CASH_NOTE_REASON strings to consts -- addres review comments -- *(cli)* update mnemonic wallet seed phrase wording -- *(CI)* upload faucet log during CI -- remove deprecated wallet deposit cmd -- fix typo for issue 1494 -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(cli)* make FilesUploadSummary public -- *(deps)* bump dependencies -- *(uploader)* return summary when upload fails due to max repayments -- *(uploader)* return the list of max repayment reached items -- remove now unused mostly duplicated code -- *(faucet)* devskim ignore -- *(faucet)* log existing faucet balance if non-zero -- *(faucet)* add foundation PK as const -- *(faucet)* clarify logs for verification -- increase initial faucet balance -- add temp log -- *(faucet)* refresh cashnotes on fund -- devSkim ignore foundation pub temp key -- update got 'gifting-from-genesis' faucet feat -- make open metrics feature default but without starting it by default -- Revert "feat(cli): track spend creation reasons during audit" -- *(node)* tuning the pricing curve -- *(node)* remove un-necessary is_relayed check inside add_potential_candidates -- move historic_quoting_metrics out of the record_store dir -- clippy fixes for open metrics feature -- *(networking)* update tests for pricing curve tweaks -- *(refactor)* stabilise node size to 4k records, -- Revert "chore: rename output reason to purpose for clarity" -- *(transfers)* comment and naming updates for clarity -- log genesis PK -- rename improperly named foundation_key -- reconfigure local network owner args -- use const for default user or owner -- Revert "feat: spend shows the purposes of outputs created for" -- *(node)* use proper SpendReason enum -- add consts - -## [0.91.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.3...sn_cli-v0.91.4) - 2024-05-20 - -### Other -- update Cargo.lock dependencies - -## [0.91.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.2...sn_cli-v0.91.3) - 2024-05-15 - -### Other -- update Cargo.lock dependencies - -## [0.91.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.1...sn_cli-v0.91.2) - 2024-05-09 - -### Fixed -- *(relay_manager)* filter out bad nodes - -## [0.91.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.0...sn_cli-v0.91.1) - 2024-05-08 - -### Other -- update Cargo.lock dependencies -- *(release)* sn_registers-v0.3.13 - -## [0.91.0-alpha.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.0-alpha.5...sn_cli-v0.91.0-alpha.6) - 2024-05-07 - -### Added -- *(client)* dump spends creation_reason statistics -- *(node)* make spend and cash_note reason field configurable -- *(cli)* readd wallet helper address for dist feat -- *(cli)* generate a mnemonic as wallet basis if no wallet found -- *(cli)* eip2333 helpers for accounts -- [**breaking**] renamings in CashNote -- [**breaking**] rename token to amount in Spend -- *(cli)* implement FilesUploadStatusNotifier trait for lib code -- *(cli)* return the files upload summary after a successful files upload -- unit testing dag, double spend poisoning tweaks -- report protocol mismatch error -- *(cli)* track spend creation reasons during audit -- *(client)* speed up register checks when paying -- double spend fork detection, fix invalid edges issue -- dag faults unit tests, sn_auditor offline mode -- *(faucet)* log from sn_client -- *(network)* add --upnp flag to node -- *(networking)* feature gate 'upnp' -- *(networking)* add UPnP behavior to open port -- *(relay)* remove autonat and enable hole punching manually -- *(relay)* remove old listen addr if we are using a relayed connection -- *(relay)* update the relay manager if the listen addr has been closed -- *(relay)* remove the dial flow -- *(relay)* impl RelayManager to perform circuit relay when behind NAT -- *(networking)* add in autonat server basics -- *(neetworking)* initial tcp use by default -- *(networking)* clear record on valid put -- *(node)* restrict replication fetch range when node is full -- *(store)* load existing records in parallel -- *(node)* notify peer it is now considered as BAD -- *(node)* restore historic quoting metrics to allow restart -- *(networking)* shift to use ilog2 bucket distance for close data calcs -- spend shows the purposes of outputs created for -- *(transfers)* do not genereate wallet by default -- *(tui)* adding services -- *(network)* network contacts url should point to the correct network version - -### Fixed -- *(cli)* acct_packet tests updated -- more test and cli fixes -- update calls to HotWallet::load -- *(client)* move acct_packet mnemonic into client layer -- *(client)* ensure we have a wallet or generate one via mnemonic -- create faucet via account load or generation -- *(client)* set uploader to use mnemonic wallet loader -- *(client)* calm down broadcast error logs if we've no listeners -- spend dag double spend links -- orphan test -- orphan parent bug, improve fault detection and logging -- *(networking)* allow wasm32 compilation -- *(network)* remove all external addresses related to a relay server -- *(relay_manager)* remove external addr on connection close -- relay server should not close connections made to a reserved peer -- short circuit identify if the peer is already present in the routitng table -- update outdated connection removal flow -- do not remove outdated connections -- increase relay server capacity -- keep idle connections forever -- pass peer id while crafting relay address -- *(relay)* crafted multi address should contain the P2PCircuit protocol -- do not add reported external addressese if we are behind home network -- *(networking)* do not add to dialed peers -- *(network)* do not strip out relay's PeerId -- *(relay)* craft the correctly formatted relay address -- *(network)* do not perform AutoNat for clients -- *(relay_manager)* do not dial with P2PCircuit protocol -- *(test)* quoting metrics might have live_time field changed along time -- *(node)* avoid false alert on FailedLocalRecord -- *(record_store)* prune only one record at a time -- *(node)* notify replication_fetcher of early completion -- *(node)* fetcher completes on_going_fetch entry on record_key only -- *(node)* not send out replication when failed read from local -- *(networking)* increase the local responsible range of nodes to K_VALUE peers away -- *(network)* clients should not perform farthest relevant record check -- *(node)* replication_fetch keep distance_range sync with record_store -- *(node)* replication_list in range filter -- transfer tests for HotWallet creation -- typo -- *(manager)* do not print to stdout on low verbosity level -- *(protocol)* evaluate NETWORK_VERSION_MODE at compile time - -### Other -- *(versions)* sync versions with latest crates.io vs -- address review comments -- refactor CASH_NOTE_REASON strings to consts -- addres review comments -- *(cli)* update mnemonic wallet seed phrase wording -- *(CI)* upload faucet log during CI -- remove deprecated wallet deposit cmd -- fix typo for issue 1494 -- *(cli)* make FilesUploadSummary public -- *(deps)* bump dependencies -- check DAG crawling performance -- store owner info inside node instead of network -- small cleanup of dead code -- improve naming and typo fix -- clarify client documentation -- clarify client::new description -- clarify client documentation -- clarify client::new description -- cargo fmt -- rename output reason to purpose for clarity -- *(network)* move event handling to its own module -- cleanup network events -- *(network)* remove nat detection via incoming connections check -- enable connection keepalive timeout -- remove non relayed listener id from relay manager -- enable multiple relay connections -- return early if peer is not a node -- *(tryout)* do not add new relay candidates -- add debug lines while adding potential relay candidates -- do not remove old non-relayed listeners -- clippy fix -- *(networking)* remove empty file -- *(networking)* re-add global_only -- use quic again -- log listner id -- *(relay)* add candidate even if we are dialing -- remove quic -- cleanup, add in relay server behaviour, and todo -- *(node)* lower some log levels to reduce log size -- *(node)* optimise record_store farthest record calculation -- *(node)* do not reset farthest_acceptance_distance -- *(node)* remove duplicated record_store fullness check -- *(networking)* notify network event on failed put due to prune -- *(networking)* ensure pruned data is indeed further away than kept -- *(CI)* confirm there is no failed replication fetch -- *(networking)* remove circular vec error -- *(node)* unit test for recover historic quoting metrics -- *(node)* pass entire QuotingMetrics into calculate_cost_for_records -- *(node)* extend distance range -- *(transfers)* reduce error size -- *(transfer)* unit tests for PaymentQuote -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_auditor-v0.1.3-alpha.0/sn_client-v0.105.3-alpha.0/sn_networking-v0.14.2-alpha.0/sn_protocol-v0.16.2-alpha.0/sn_build_info-v0.1.7-alpha.0/sn_transfers-v0.17.2-alpha.0/sn_peers_acquisition-v0.2.9-alpha.0/sn_cli-v0.90.3-alpha.0/sn_node-v0.105.4-alpha.0/sn-node-manager-v0.7.3-alpha.0/sn_faucet-v0.4.4-alpha.0/sn_service_management-v0.2.2-alpha.0/sn_node_rpc_client-v0.6.4-alpha.0 -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_client-v0.105.3-alpha.5/sn_protocol-v0.16.3-alpha.2/sn_cli-v0.90.4-alpha.5/sn_node-v0.105.6-alpha.4/sn-node-manager-v0.7.4-alpha.1/sn_auditor-v0.1.7-alpha.0/sn_networking-v0.14.4-alpha.0/sn_peers_acquisition-v0.2.10-alpha.0/sn_faucet-v0.4.9-alpha.0/sn_service_management-v0.2.4-alpha.0/sn_node_rpc_client-v0.6.8-alpha.0 -- *(release)* sn_client-v0.105.3-alpha.3/sn_protocol-v0.16.3-alpha.1/sn_peers_acquisition-v0.2.9-alpha.2/sn_cli-v0.90.4-alpha.3/sn_node-v0.105.6-alpha.1/sn_auditor-v0.1.5-alpha.0/sn_networking-v0.14.3-alpha.0/sn_faucet-v0.4.7-alpha.0/sn_service_management-v0.2.3-alpha.0/sn-node-manager-v0.7.4-alpha.0/sn_node_rpc_client-v0.6.6-alpha.0 -- *(release)* sn_auditor-v0.1.3-alpha.1/sn_client-v0.105.3-alpha.1/sn_networking-v0.14.2-alpha.1/sn_peers_acquisition-v0.2.9-alpha.1/sn_cli-v0.90.4-alpha.1/sn_metrics-v0.1.4-alpha.0/sn_node-v0.105.5-alpha.1/sn_service_management-v0.2.2-alpha.1/sn-node-manager-v0.7.3-alpha.1/sn_node_rpc_client-v0.6.4-alpha.1/token_supplies-v0.1.47-alpha.0 -- *(release)* sn_build_info-v0.1.7-alpha.1/sn_protocol-v0.16.3-alpha.0/sn_cli-v0.90.4-alpha.0/sn_faucet-v0.4.5-alpha.0/sn_node-v0.105.5-alpha.0 - -## [0.90.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.90.1...sn_cli-v0.90.2) - 2024-03-28 - -### Fixed -- *(cli)* read from cache during initial chunking process -- *(uploader)* do not error out on quote expiry during get store cost - -## [0.90.1](https://github.com/joshuef/safe_network/compare/sn_cli-v0.90.0...sn_cli-v0.90.1) - 2024-03-28 - -### Added -- *(uploader)* error out if the quote has expired during get store_cost -- *(uploader)* use WalletApi to prevent loading client wallet during each operation -- *(transfers)* implement WalletApi to expose common methods - -### Fixed -- *(uploader)* clarify the use of root and wallet dirs - -### Other -- *(uploader)* update docs - -## [0.90.0](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.85...sn_cli-v0.90.0) - 2024-03-27 - -### Added -- *(cli)* expose AccountPacket APIs from a lib so it can be used by other apps -- *(uploader)* collect all the uploaded registers -- *(uploader)* allow either chunk or chunk path to be used -- *(uploader)* register existence should be checked before going with payment flow -- *(client)* use the new Uploader insetead of FilesUpload -- make logging simpler to use -- [**breaking**] remove gossip code -- svg caching, fault tolerance during DAG collection -- *(uploader)* repay immediately if the quote has expired -- *(uploader)* use ClientRegister instead of Registers -- *(client)* implement a generic uploader with repay ability -- *(transfers)* enable client to check if a quote has expired -- *(client)* make publish register as an associated function -- *(network)* filter out peers when returning store cost -- *(transfers)* [**breaking**] support multiple payments for the same xorname -- use Arc inside Client, Network to reduce clone cost -- *(networking)* add NodeIssue for tracking bad node shunning -- *(faucet)* rate limit based upon wallet locks - -### Fixed -- *(cli)* files should be chunked before checking if the chunks are empty -- *(test)* use tempfile lib instead of stdlib to create temp dirs -- *(clippy)* allow too many arguments as it is a private function -- *(uploader)* remove unused error tracking and allow retries for new payee -- *(uploader)* make the internals more clean -- *(uploader)* update force make payment logic -- *(register)* permissions verification was not being made by some Register APIs -- *(node)* fetching new data shall not cause timed_out immediately -- *(test)* generate unique temp dir to avoid read outdated data -- *(register)* shortcut permissions check when anyone can write to Register - -### Other -- *(cli)* moving binary target related files onto src/bin dir -- *(uploader)* remove FilesApi dependency -- *(uploader)* implement UploaderInterface for easier testing -- rename of function to be more descriptive -- remove counter run through several functions and replace with simple counter -- *(register)* minor simplification in Register Permissions implementation -- *(uploader)* remove unused code path when store cost is 0 -- *(uploader)* implement tests to test the basic pipeline logic -- *(uploader)* initial test setup for uploader -- *(uploader)* remove failed_to states -- *(node)* refactor pricing metrics -- lower some networking log levels -- *(node)* loose bad node detection criteria -- *(node)* optimization to reduce logging - -## [0.89.85](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.84...sn_cli-v0.89.85) - 2024-03-21 - -### Added -- *(cli)* have CLI folders cmds to act on current directory by default -- *(folders)* folders APIs to accept an encryption key for metadata chunks -- *(log)* set log levels on the fly -- improve parallelisation with buffered streams -- refactor DAG, improve error management and security -- dag error recording -- *(protocol)* add rpc to set node log level on the fly - -### Other -- *(cli)* adding automated test for metadata chunk encryption -- *(cli)* adding some high-level doc to acc-packet codebase -- *(node)* reduce bad_nodes check resource usage - -## [0.89.84](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.83...sn_cli-v0.89.84) - 2024-03-18 - -### Other -- *(acc-packet)* adding test for acc-packet moved to a different location on disk -- *(acc-packet)* adding unit test for acc-packet changes scanning logic -- *(acc-packet)* adding unit test to private methods/helpers -- *(cli)* breaking up acc-packet logic within its own mod -- name change to spawn events handler -- increase of text length -- iterate upload code rearranged for clear readability - -## [0.89.83](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.82...sn_cli-v0.89.83) - 2024-03-14 - -### Added -- self in import change -- moved param to outside calc -- refactor spend validation - -### Fixed -- *(cli)* allow to upload chunks from acc-packet using chunked files local cache -- *(cli)* use chunk-mgr with iterator skipping tracking info files - -### Other -- *(acc-packet)* adding verifications to compare tracking info generated on acc-packets cloned -- *(acc-packet)* adding verifications to compare the files/dirs stored on acc-packets cloned -- *(acc-packet)* testing sync empty root dirs -- *(acc-packet)* testing mutations syncing across clones of an acc-packet -- *(acc-packet)* adding automated tests to sn_cli::AccountPacket -- *(cli)* chunk-mgr to report files chunked/uploaded rather than bailing out -- improve code quality -- new `sn_service_management` crate - -## [0.89.82-alpha.1](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.82-alpha.0...sn_cli-v0.89.82-alpha.1) - 2024-03-08 - -### Added -- reference checks -- reference checks -- builder added to estimate -- removal of unnecessary code in upload rs -- remove all use of client in iter uploader - -### Other -- *(folders)* adding automated tests to sn_client::FoldersApi - -## [0.89.81](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.80...sn_cli-v0.89.81) - 2024-03-06 - -### Added -- *(cli)* cmd to initialise a directory as root Folder for storing and syncing on/with network -- *(cli)* pull any Folders changes from network when syncing and merge them to local version -- make sn_cli use sn_clients reeports -- *(cli)* files download respects filename path -- *(folders)* make payments for local mutations detected before syncing -- *(folders)* build mutations report to be used by status and sync apis -- *(folders)* sync up logic and CLI cmd -- impl iterate uploader self to extract spawn theads -- impl iterate uploader self to extract spawn theads -- elevate files api and cm -- refactor upload with iter -- a more clear param for a message function -- split upload and upload with iter -- removal of some messages from vody body -- batch royalties redemption -- collect royalties through DAG -- *(folders)* avoid chunking files when retrieving them with Folders from the network -- *(folders)* store files data-map within Folders metadata chunk -- file to download -- *(folders)* regenerate tracking info when downloading Folders fm the network -- *(folders)* realise local changes made to folders/files -- *(folders)* keep track of local changes to Folders - -### Fixed -- *(folders)* set correct change state to folders when scanning -- *(folders)* keep track of root folder sync status - -### Other -- clean swarm commands errs and spend errors -- also add deps features in sn_client -- *(release)* sn_transfers-v0.16.1 -- *(release)* sn_protocol-v0.15.0/sn-node-manager-v0.4.0 -- *(cli)* removing some redundant logic from acc-packet codebase -- *(cli)* minor improvements to acc-packet codebase comments -- rename to iterative upload -- rename to iterative upload -- *(folders)* some simplifications to acc-packet codebase -- *(folders)* minor improvements to folders status report - -## [0.89.80](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.79...sn_cli-v0.89.80) - 2024-02-23 - -### Added -- file to upload -- estimate refactor - -## [0.89.79](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.78...sn_cli-v0.89.79) - 2024-02-21 - -### Other -- update Cargo.lock dependencies - -## [0.89.78](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.77...sn_cli-v0.89.78) - 2024-02-20 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.77](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.76...sn_cli-v0.89.77) - 2024-02-20 - -### Added -- dependency reconfiguration -- nano to snt -- concurrent estimate without error messages -- make data public bool -- removal of the retry strategy -- estimate feature with ci and balance after with fn docs - -## [0.89.76](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.75...sn_cli-v0.89.76) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.26/sn-node-manager-v0.3.6/sn_client-v0.104.23/sn_node-v0.104.31 - -## [0.89.75](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.74...sn_cli-v0.89.75) - 2024-02-20 - -### Added -- spend and DAG utilities - -## [0.89.74](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.73...sn_cli-v0.89.74) - 2024-02-20 - -### Added -- *(folders)* move folders/files metadata out of Folders entries - -## [0.89.73](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.72...sn_cli-v0.89.73) - 2024-02-20 - -### Other -- updated the following local packages: sn_client, sn_registers - -## [0.89.72](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.71...sn_cli-v0.89.72) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.23/sn_node-v0.104.26/sn_client-v0.104.18/sn_node_rpc_client-v0.4.57 - -## [0.89.71](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.70...sn_cli-v0.89.71) - 2024-02-19 - -### Other -- *(release)* sn_networking-v0.13.21/sn_client-v0.104.16/sn_node-v0.104.24 - -## [0.89.70](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.69...sn_cli-v0.89.70) - 2024-02-19 - -### Other -- *(cli)* allow to pass files iterator to chunk-mgr and files-upload tools - -## [0.89.69](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.68...sn_cli-v0.89.69) - 2024-02-15 - -### Added -- *(client)* keep payee as part of storage payment cache - -### Other -- *(release)* sn_networking-v0.13.19/sn_faucet-v0.3.67/sn_client-v0.104.14/sn_node-v0.104.22 - -## [0.89.68](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.67...sn_cli-v0.89.68) - 2024-02-15 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.67](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.66...sn_cli-v0.89.67) - 2024-02-14 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.66](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.65...sn_cli-v0.89.66) - 2024-02-14 - -### Other -- *(refactor)* move mod.rs files the modern way - -## [0.89.65](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.64...sn_cli-v0.89.65) - 2024-02-13 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.63...sn_cli-v0.89.64) - 2024-02-13 - -### Added -- identify orphans and inconsistencies in the DAG - -### Fixed -- manage the genesis spend case - -## [0.89.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.62...sn_cli-v0.89.63) - 2024-02-12 - -### Other -- *(release)* sn_networking-v0.13.12/sn_node-v0.104.12/sn-node-manager-v0.1.59/sn_client-v0.104.7/sn_node_rpc_client-v0.4.46 - -## [0.89.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.61...sn_cli-v0.89.62) - 2024-02-12 - -### Added -- *(cli)* single payment for all folders being synced -- *(cli)* adding Folders download CLI cmd -- *(client)* adding Folders sync API and CLI cmd - -### Other -- *(cli)* improvements based on peer review -- *(cli)* adding simple example doc for using Folders cmd -- *(cli)* moving some Folder logic to a private helper function - -## [0.89.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.60...sn_cli-v0.89.61) - 2024-02-12 - -### Other -- update Cargo.lock dependencies - -## [0.89.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.59...sn_cli-v0.89.60) - 2024-02-09 - -### Other -- *(release)* sn_networking-v0.13.10/sn_client-v0.104.4/sn_node-v0.104.8 - -## [0.89.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.58...sn_cli-v0.89.59) - 2024-02-09 - -### Other -- update dependencies - -## [0.89.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.57...sn_cli-v0.89.58) - 2024-02-08 - -### Other -- copyright update to current year - -## [0.89.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.56...sn_cli-v0.89.57) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.55...sn_cli-v0.89.56) - 2024-02-08 - -### Added -- move the RetryStrategy into protocol and use that during cli upload/download - -### Fixed -- *(bench)* update retry strategy args - -### Other -- *(network)* rename re-attempts to retry strategy - -## [0.89.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.54...sn_cli-v0.89.55) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.53...sn_cli-v0.89.54) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.52...sn_cli-v0.89.53) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.51...sn_cli-v0.89.52) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.50...sn_cli-v0.89.51) - 2024-02-07 - -### Other -- update dependencies - -## [0.89.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.49...sn_cli-v0.89.50) - 2024-02-07 - -### Added -- extendable local state DAG in cli - -## [0.89.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.48...sn_cli-v0.89.49) - 2024-02-06 - -### Other -- update dependencies - -## [0.89.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.47...sn_cli-v0.89.48) - 2024-02-06 - -### Other -- update dependencies - -## [0.89.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.46...sn_cli-v0.89.47) - 2024-02-06 - -### Other -- update dependencies - -## [0.89.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.45...sn_cli-v0.89.46) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.44...sn_cli-v0.89.45) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.43...sn_cli-v0.89.44) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.42...sn_cli-v0.89.43) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.41...sn_cli-v0.89.42) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.40...sn_cli-v0.89.41) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.39...sn_cli-v0.89.40) - 2024-02-02 - -### Other -- update dependencies - -## [0.89.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.38...sn_cli-v0.89.39) - 2024-02-02 - -### Other -- update dependencies - -## [0.89.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.37...sn_cli-v0.89.38) - 2024-02-02 - -### Other -- update dependencies - -## [0.89.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.36...sn_cli-v0.89.37) - 2024-02-01 - -### Other -- update dependencies - -## [0.89.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.35...sn_cli-v0.89.36) - 2024-02-01 - -### Fixed -- *(cli)* move UploadedFiles creation logic from ChunkManager -- *(cli)* chunk manager to return error if fs operation fails - -### Other -- *(cli)* use 'completed' files everywhere - -## [0.89.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.34...sn_cli-v0.89.35) - 2024-02-01 - -### Other -- update dependencies - -## [0.89.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.33...sn_cli-v0.89.34) - 2024-01-31 - -### Other -- update dependencies - -## [0.89.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.32...sn_cli-v0.89.33) - 2024-01-31 - -### Other -- update dependencies - -## [0.89.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.31...sn_cli-v0.89.32) - 2024-01-31 - -### Other -- update dependencies - -## [0.89.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.30...sn_cli-v0.89.31) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.29...sn_cli-v0.89.30) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.28...sn_cli-v0.89.29) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.27...sn_cli-v0.89.28) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.26...sn_cli-v0.89.27) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.25...sn_cli-v0.89.26) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.24...sn_cli-v0.89.25) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.23...sn_cli-v0.89.24) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.22...sn_cli-v0.89.23) - 2024-01-29 - -### Other -- *(cli)* moving wallet mod into its own mod folder - -## [0.89.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.21...sn_cli-v0.89.22) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.20...sn_cli-v0.89.21) - 2024-01-26 - -### Other -- update dependencies - -## [0.89.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.19...sn_cli-v0.89.20) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.18...sn_cli-v0.89.19) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.17...sn_cli-v0.89.18) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.16...sn_cli-v0.89.17) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.15...sn_cli-v0.89.16) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.14...sn_cli-v0.89.15) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.13...sn_cli-v0.89.14) - 2024-01-24 - -### Other -- update dependencies - -## [0.89.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.12...sn_cli-v0.89.13) - 2024-01-24 - -### Other -- update dependencies - -## [0.89.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.11...sn_cli-v0.89.12) - 2024-01-24 - -### Other -- update dependencies - -## [0.89.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.10...sn_cli-v0.89.11) - 2024-01-23 - -### Other -- update dependencies - -## [0.89.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.9...sn_cli-v0.89.10) - 2024-01-23 - -### Other -- update dependencies - -## [0.89.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.8...sn_cli-v0.89.9) - 2024-01-23 - -### Other -- *(release)* sn_protocol-v0.10.14/sn_networking-v0.12.35 - -## [0.89.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.7...sn_cli-v0.89.8) - 2024-01-22 - -### Other -- update dependencies - -## [0.89.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.6...sn_cli-v0.89.7) - 2024-01-22 - -### Other -- update dependencies - -## [0.89.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.5...sn_cli-v0.89.6) - 2024-01-21 - -### Other -- update dependencies - -## [0.89.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.4...sn_cli-v0.89.5) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.3...sn_cli-v0.89.4) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.2...sn_cli-v0.89.3) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.1...sn_cli-v0.89.2) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.0...sn_cli-v0.89.1) - 2024-01-17 - -### Other -- update dependencies - -## [0.89.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.22...sn_cli-v0.89.0) - 2024-01-17 - -### Other -- *(client)* [**breaking**] move out client connection progress bar - -## [0.88.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.21...sn_cli-v0.88.22) - 2024-01-17 - -### Other -- update dependencies - -## [0.88.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.20...sn_cli-v0.88.21) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.19...sn_cli-v0.88.20) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.18...sn_cli-v0.88.19) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.17...sn_cli-v0.88.18) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.16...sn_cli-v0.88.17) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.15...sn_cli-v0.88.16) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.14...sn_cli-v0.88.15) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.13...sn_cli-v0.88.14) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.12...sn_cli-v0.88.13) - 2024-01-12 - -### Other -- update dependencies - -## [0.88.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.11...sn_cli-v0.88.12) - 2024-01-12 - -### Other -- update dependencies - -## [0.88.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.10...sn_cli-v0.88.11) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.9...sn_cli-v0.88.10) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.8...sn_cli-v0.88.9) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.7...sn_cli-v0.88.8) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.6...sn_cli-v0.88.7) - 2024-01-10 - -### Added -- *(client)* client APIs and CLI cmd to broadcast a transaction signed offline -- *(cli)* new cmd to sign a transaction offline -- *(cli)* new wallet cmd to create a unsigned transaction to be used for offline signing - -### Other -- *(transfers)* solving clippy issues about complex fn args - -## [0.88.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.5...sn_cli-v0.88.6) - 2024-01-10 - -### Other -- update dependencies - -## [0.88.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.4...sn_cli-v0.88.5) - 2024-01-10 - -### Added -- allow register CLI to create a public register writable to anyone - -## [0.88.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.3...sn_cli-v0.88.4) - 2024-01-09 - -### Other -- update dependencies - -## [0.88.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.2...sn_cli-v0.88.3) - 2024-01-09 - -### Other -- update dependencies - -## [0.88.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.1...sn_cli-v0.88.2) - 2024-01-09 - -### Other -- update dependencies - -## [0.88.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.0...sn_cli-v0.88.1) - 2024-01-09 - -### Added -- *(cli)* safe wallet create saves new key - -## [0.88.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.87.0...sn_cli-v0.88.0) - 2024-01-08 - -### Added -- provide `--first` argument for `safenode` - -## [0.87.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.103...sn_cli-v0.87.0) - 2024-01-08 - -### Added -- *(cli)* intergrate FilesDownload with cli - -### Other -- *(client)* [**breaking**] refactor `Files` into `FilesUpload` - -## [0.86.103](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.102...sn_cli-v0.86.103) - 2024-01-08 - -### Other -- update dependencies - -## [0.86.102](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.101...sn_cli-v0.86.102) - 2024-01-08 - -### Other -- more doc updates to readme files - -## [0.86.101](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.100...sn_cli-v0.86.101) - 2024-01-08 - -### Other -- update dependencies - -## [0.86.100](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.99...sn_cli-v0.86.100) - 2024-01-08 - -### Other -- update dependencies - -## [0.86.99](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.98...sn_cli-v0.86.99) - 2024-01-06 - -### Fixed -- *(cli)* read datamap when the xor addr of the file is provided - -## [0.86.98](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.97...sn_cli-v0.86.98) - 2024-01-05 - -### Other -- update dependencies - -## [0.86.97](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.96...sn_cli-v0.86.97) - 2024-01-05 - -### Other -- add clippy unwrap lint to workspace - -## [0.86.96](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.95...sn_cli-v0.86.96) - 2024-01-05 - -### Other -- update dependencies - -## [0.86.95](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.94...sn_cli-v0.86.95) - 2024-01-05 - -### Added -- *(cli)* store uploaded file metadata - -## [0.86.94](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.93...sn_cli-v0.86.94) - 2024-01-05 - -### Other -- *(cli)* error if there is no file to upload - -## [0.86.93](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.92...sn_cli-v0.86.93) - 2024-01-05 - -### Other -- update dependencies - -## [0.86.92](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.91...sn_cli-v0.86.92) - 2024-01-04 - -### Other -- update dependencies - -## [0.86.91](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.90...sn_cli-v0.86.91) - 2024-01-04 - -### Other -- *(cli)* print private data warning -- *(cli)* print the datamap's entire hex addr during first attempt - -## [0.86.90](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.89...sn_cli-v0.86.90) - 2024-01-03 - -### Other -- *(cli)* print the datamap's entire hex addr - -## [0.86.89](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.88...sn_cli-v0.86.89) - 2024-01-03 - -### Added -- *(cli)* keep downloaded files in a safe subdir -- *(client)* clients no longer upload data_map by default - -### Fixed -- *(cli)* write datamap to metadata - -### Other -- clippy test fixes and updates -- *(cli)* add not to non-public uploaded files -- refactor for clarity around head_chunk_address -- *(cli)* do not write datamap chunk if non-public - -## [0.86.88](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.87...sn_cli-v0.86.88) - 2024-01-03 - -### Other -- update dependencies - -## [0.86.87](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.86...sn_cli-v0.86.87) - 2024-01-02 - -### Other -- update dependencies - -## [0.86.86](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.85...sn_cli-v0.86.86) - 2024-01-02 - -### Other -- update dependencies - -## [0.86.85](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.84...sn_cli-v0.86.85) - 2023-12-29 - -### Other -- update dependencies - -## [0.86.84](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.83...sn_cli-v0.86.84) - 2023-12-29 - -### Other -- update dependencies - -## [0.86.83](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.82...sn_cli-v0.86.83) - 2023-12-29 - -### Other -- update dependencies - -## [0.86.82](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.81...sn_cli-v0.86.82) - 2023-12-26 - -### Other -- update dependencies - -## [0.86.81](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.80...sn_cli-v0.86.81) - 2023-12-22 - -### Other -- update dependencies - -## [0.86.80](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.79...sn_cli-v0.86.80) - 2023-12-22 - -### Fixed -- printout un-verified files to alert user - -## [0.86.79](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.78...sn_cli-v0.86.79) - 2023-12-21 - -### Other -- log full Register address when created in cli and example app - -## [0.86.78](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.77...sn_cli-v0.86.78) - 2023-12-21 - -### Other -- *(client)* emit chunk Uploaded event if a chunk was verified during repayment - -## [0.86.77](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.76...sn_cli-v0.86.77) - 2023-12-20 - -### Other -- reduce default batch size - -## [0.86.76](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.75...sn_cli-v0.86.76) - 2023-12-19 - -### Added -- network royalties through audit POC - -## [0.86.75](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.74...sn_cli-v0.86.75) - 2023-12-19 - -### Other -- update dependencies - -## [0.86.74](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.73...sn_cli-v0.86.74) - 2023-12-19 - -### Other -- update dependencies - -## [0.86.73](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.72...sn_cli-v0.86.73) - 2023-12-19 - -### Other -- update dependencies - -## [0.86.72](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.71...sn_cli-v0.86.72) - 2023-12-19 - -### Fixed -- *(cli)* mark chunk completion as soon as we upload each chunk - -## [0.86.71](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.70...sn_cli-v0.86.71) - 2023-12-18 - -### Other -- update dependencies - -## [0.86.70](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.69...sn_cli-v0.86.70) - 2023-12-18 - -### Added -- *(cli)* random shuffle upload chunks to allow clients co-operation - -## [0.86.69](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.68...sn_cli-v0.86.69) - 2023-12-18 - -### Other -- update dependencies - -## [0.86.68](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.67...sn_cli-v0.86.68) - 2023-12-18 - -### Added -- *(client)* update the Files config via setters -- *(client)* track the upload stats inside Files -- *(client)* move upload retry logic from CLI to client - -### Other -- *(client)* add docs to the Files struct -- *(cli)* use the new client Files api to upload chunks - -## [0.86.67](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.66...sn_cli-v0.86.67) - 2023-12-14 - -### Other -- update dependencies - -## [0.86.66](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.65...sn_cli-v0.86.66) - 2023-12-14 - -### Other -- update dependencies - -## [0.86.65](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.64...sn_cli-v0.86.65) - 2023-12-14 - -### Other -- *(cli)* make upload summary printout clearer - -## [0.86.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.63...sn_cli-v0.86.64) - 2023-12-14 - -### Other -- *(cli)* make sequential payment fail limit a const - -## [0.86.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.62...sn_cli-v0.86.63) - 2023-12-14 - -### Other -- *(cli)* make wallet address easy to copy -- *(cli)* peer list is not printed to stdout - -## [0.86.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.61...sn_cli-v0.86.62) - 2023-12-14 - -### Added -- *(cli)* cli arg for controlling chunk retries -- *(cli)* simple retry mechanism for remaining chunks - -### Other -- prevent retries on ci runs w/ '-r 0' - -## [0.86.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.60...sn_cli-v0.86.61) - 2023-12-13 - -### Other -- *(cli)* refactor upload_files - -## [0.86.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.59...sn_cli-v0.86.60) - 2023-12-13 - -### Other -- update dependencies - -## [0.86.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.58...sn_cli-v0.86.59) - 2023-12-13 - -### Added -- *(cli)* download path is familiar to users - -## [0.86.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.57...sn_cli-v0.86.58) - 2023-12-13 - -### Added -- audit DAG collection and visualization -- cli double spends audit from genesis - -## [0.86.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.56...sn_cli-v0.86.57) - 2023-12-12 - -### Other -- update dependencies - -## [0.86.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.55...sn_cli-v0.86.56) - 2023-12-12 - -### Added -- *(cli)* skip payment and upload for existing chunks - -## [0.86.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.54...sn_cli-v0.86.55) - 2023-12-12 - -### Other -- update dependencies - -## [0.86.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.53...sn_cli-v0.86.54) - 2023-12-12 - -### Added -- constant uploading across batches - -### Fixed -- *(cli)* remove chunk_manager clone that is unsafe - -### Other -- *(networking)* add replication logs -- *(networking)* solidify REPLICATION_RANGE use. exclude self_peer_id in some calcs -- *(cli)* bail early on any payment errors -- *(cli)* only report uploaded files if no errors - -## [0.86.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.52...sn_cli-v0.86.53) - 2023-12-12 - -### Other -- update dependencies - -## [0.86.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.51...sn_cli-v0.86.52) - 2023-12-11 - -### Other -- update dependencies - -## [0.86.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.50...sn_cli-v0.86.51) - 2023-12-11 - -### Other -- *(cli)* ux improvements after upload completes - -## [0.86.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.49...sn_cli-v0.86.50) - 2023-12-08 - -### Other -- update dependencies - -## [0.86.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.48...sn_cli-v0.86.49) - 2023-12-08 - -### Other -- update dependencies - -## [0.86.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.47...sn_cli-v0.86.48) - 2023-12-08 - -### Other -- update dependencies - -## [0.86.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.46...sn_cli-v0.86.47) - 2023-12-07 - -### Other -- update dependencies - -## [0.86.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.45...sn_cli-v0.86.46) - 2023-12-06 - -### Other -- update dependencies - -## [0.86.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.44...sn_cli-v0.86.45) - 2023-12-06 - -### Other -- update dependencies - -## [0.86.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.43...sn_cli-v0.86.44) - 2023-12-06 - -### Added -- *(cli)* enable gossipsub for client when wallet cmd requires it -- *(wallet)* basic impl of a watch-only wallet API - -### Other -- *(wallet)* major refactoring removing redundant and unused code -- *(cli)* Fix duplicate use of 'n' short flag -- *(cli)* All --name flags have short 'n' flag - -## [0.86.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.42...sn_cli-v0.86.43) - 2023-12-06 - -### Other -- remove some needless cloning -- remove needless pass by value -- use inline format args -- add boilerplate for workspace lints - -## [0.86.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.41...sn_cli-v0.86.42) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.40...sn_cli-v0.86.41) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.39...sn_cli-v0.86.40) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.38...sn_cli-v0.86.39) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.37...sn_cli-v0.86.38) - 2023-12-05 - -### Added -- allow for cli chunk put retries for un verifiable chunks - -### Fixed -- mark chunks as completed when no failures on retry - -## [0.86.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.36...sn_cli-v0.86.37) - 2023-12-05 - -### Other -- *(cli)* print the failed uploads stats -- *(cli)* remove unpaid/paid distinction from chunk manager - -## [0.86.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.35...sn_cli-v0.86.36) - 2023-12-05 - -### Other -- *(networking)* remove triggered bootstrap slowdown - -## [0.86.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.34...sn_cli-v0.86.35) - 2023-12-04 - -### Other -- update dependencies - -## [0.86.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.33...sn_cli-v0.86.34) - 2023-12-01 - -### Other -- *(ci)* fix CI build cache parsing error - -## [0.86.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.32...sn_cli-v0.86.33) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.31...sn_cli-v0.86.32) - 2023-11-29 - -### Added -- most of nodes not subscribe to royalty_transfer topic - -## [0.86.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.30...sn_cli-v0.86.31) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.29...sn_cli-v0.86.30) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.28...sn_cli-v0.86.29) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.27...sn_cli-v0.86.28) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.26...sn_cli-v0.86.27) - 2023-11-29 - -### Added -- verify all the way to genesis -- verify spends through the cli - -### Fixed -- genesis check security flaw - -## [0.86.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.25...sn_cli-v0.86.26) - 2023-11-28 - -### Added -- *(cli)* serialise chunks metadata on disk with MsgPack instead of bincode -- *(royalties)* serialise royalties notifs with MsgPack instead of bincode - -## [0.86.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.24...sn_cli-v0.86.25) - 2023-11-28 - -### Other -- update dependencies - -## [0.86.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.23...sn_cli-v0.86.24) - 2023-11-28 - -### Other -- update dependencies - -## [0.86.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.22...sn_cli-v0.86.23) - 2023-11-27 - -### Other -- update dependencies - -## [0.86.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.21...sn_cli-v0.86.22) - 2023-11-24 - -### Added -- *(cli)* peers displayed as list - -## [0.86.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.20...sn_cli-v0.86.21) - 2023-11-24 - -### Other -- update dependencies - -## [0.86.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.19...sn_cli-v0.86.20) - 2023-11-23 - -### Added -- record put retry even when not verifying -- retry at the record level, remove all other retries, report errors - -### Other -- appease clippy -- fix tests compilation - -## [0.86.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.18...sn_cli-v0.86.19) - 2023-11-23 - -### Other -- update dependencies - -## [0.86.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.17...sn_cli-v0.86.18) - 2023-11-23 - -### Other -- update dependencies - -## [0.86.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.16...sn_cli-v0.86.17) - 2023-11-23 - -### Other -- update dependencies - -## [0.86.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.15...sn_cli-v0.86.16) - 2023-11-22 - -### Other -- update dependencies - -## [0.86.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.14...sn_cli-v0.86.15) - 2023-11-22 - -### Added -- *(cli)* add download batch-size option - -## [0.86.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.13...sn_cli-v0.86.14) - 2023-11-22 - -### Other -- update dependencies - -## [0.86.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.12...sn_cli-v0.86.13) - 2023-11-21 - -### Added -- make joining gossip for clients and rpc nodes optional - -## [0.86.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.11...sn_cli-v0.86.12) - 2023-11-21 - -### Other -- update dependencies - -## [0.86.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.10...sn_cli-v0.86.11) - 2023-11-20 - -### Other -- increase default batch size - -## [0.86.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.9...sn_cli-v0.86.10) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.8...sn_cli-v0.86.9) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.7...sn_cli-v0.86.8) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.6...sn_cli-v0.86.7) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.5...sn_cli-v0.86.6) - 2023-11-20 - -### Fixed -- use actual quote instead of dummy - -## [0.86.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.4...sn_cli-v0.86.5) - 2023-11-17 - -### Other -- update dependencies - -## [0.86.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.3...sn_cli-v0.86.4) - 2023-11-17 - -### Other -- update dependencies - -## [0.86.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.2...sn_cli-v0.86.3) - 2023-11-16 - -### Other -- update dependencies - -## [0.86.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.1...sn_cli-v0.86.2) - 2023-11-16 - -### Added -- massive cleaning to prepare for quotes - -## [0.86.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.0...sn_cli-v0.86.1) - 2023-11-15 - -### Other -- update dependencies - -## [0.86.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.20...sn_cli-v0.86.0) - 2023-11-15 - -### Added -- *(client)* [**breaking**] error out if we cannot connect to the network in - -### Other -- *(client)* [**breaking**] remove request_response timeout argument - -## [0.85.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.19...sn_cli-v0.85.20) - 2023-11-15 - -### Added -- *(royalties)* make royalties payment to be 15% of the total storage cost -- *(protocol)* move test utils behind a feature gate - -## [0.85.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.18...sn_cli-v0.85.19) - 2023-11-14 - -### Other -- *(royalties)* verify royalties fees amounts - -## [0.85.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.17...sn_cli-v0.85.18) - 2023-11-14 - -### Other -- update dependencies - -## [0.85.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.16...sn_cli-v0.85.17) - 2023-11-14 - -### Other -- update dependencies - -## [0.85.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.15...sn_cli-v0.85.16) - 2023-11-14 - -### Fixed -- *(cli)* marking chunks as verified should mark them as paid too - -## [0.85.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.14...sn_cli-v0.85.15) - 2023-11-14 - -### Fixed -- *(cli)* repay unpaid chunks due to transfer failures - -## [0.85.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.13...sn_cli-v0.85.14) - 2023-11-13 - -### Fixed -- *(cli)* failed to move chunk path shall not get deleted - -## [0.85.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.12...sn_cli-v0.85.13) - 2023-11-13 - -### Fixed -- avoid infinite looping on verification during upload - -## [0.85.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.11...sn_cli-v0.85.12) - 2023-11-13 - -### Other -- update dependencies - -## [0.85.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.10...sn_cli-v0.85.11) - 2023-11-13 - -### Other -- *(cli)* disable silent ignoring of wallet errors - -## [0.85.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.9...sn_cli-v0.85.10) - 2023-11-10 - -### Added -- *(cli)* attempt to reload wallet from disk if storing it fails when receiving transfers online -- *(cli)* new cmd to listen to royalties payments and deposit them into a local wallet - -### Other -- *(cli)* minor improvement to help docs - -## [0.85.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.8...sn_cli-v0.85.9) - 2023-11-10 - -### Other -- update dependencies - -## [0.85.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.7...sn_cli-v0.85.8) - 2023-11-09 - -### Other -- update dependencies - -## [0.85.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.6...sn_cli-v0.85.7) - 2023-11-09 - -### Other -- update dependencies - -## [0.85.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.5...sn_cli-v0.85.6) - 2023-11-09 - -### Added -- increase retry count for chunk put -- chunk put retry taking repayment into account - -### Other -- const instead of magic num in code for wait time -- please ci - -## [0.85.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.4...sn_cli-v0.85.5) - 2023-11-08 - -### Other -- update dependencies - -## [0.85.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.3...sn_cli-v0.85.4) - 2023-11-08 - -### Fixed -- *(bench)* update benchmark to account for de duplicated files - -## [0.85.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.2...sn_cli-v0.85.3) - 2023-11-08 - -### Other -- update dependencies - -## [0.85.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.1...sn_cli-v0.85.2) - 2023-11-07 - -### Other -- update dependencies - -## [0.85.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.0...sn_cli-v0.85.1) - 2023-11-07 - -### Other -- update dependencies - -## [0.85.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.51...sn_cli-v0.85.0) - 2023-11-07 - -### Added -- *(cli)* store paid and upaid chunks separately -- *(cli)* use ChunkManager during the upload process -- *(cli)* implement ChunkManager to re-use already chunked files - -### Fixed -- *(cli)* keep track of files that have been completely uploaded -- *(cli)* get bytes from OsStr by first converting it into lossy string -- *(client)* [**breaking**] make `Files::chunk_file` into an associated function -- *(upload)* don't ignore file if filename cannot be converted from OsString to String - -### Other -- rename test function and spell correction -- *(cli)* add more tests to chunk manager for unpaid paid dir refactor -- *(cli)* add some docs to ChunkManager -- *(cli)* add tests for `ChunkManager` -- *(cli)* move chunk management to its own module - -## [0.84.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.50...sn_cli-v0.84.51) - 2023-11-07 - -### Other -- update dependencies - -## [0.84.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.49...sn_cli-v0.84.50) - 2023-11-07 - -### Other -- update dependencies - -## [0.84.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.48...sn_cli-v0.84.49) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.47...sn_cli-v0.84.48) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.46...sn_cli-v0.84.47) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.45...sn_cli-v0.84.46) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.44...sn_cli-v0.84.45) - 2023-11-06 - -### Added -- *(deps)* upgrade libp2p to 0.53 - -## [0.84.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.43...sn_cli-v0.84.44) - 2023-11-03 - -### Other -- *(cli)* make file upload output cut n paste friendly - -## [0.84.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.42...sn_cli-v0.84.43) - 2023-11-03 - -### Other -- update dependencies - -## [0.84.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.41...sn_cli-v0.84.42) - 2023-11-02 - -### Other -- update dependencies - -## [0.84.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.40...sn_cli-v0.84.41) - 2023-11-02 - -### Other -- update dependencies - -## [0.84.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.39...sn_cli-v0.84.40) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.38...sn_cli-v0.84.39) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.37...sn_cli-v0.84.38) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.36...sn_cli-v0.84.37) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.35...sn_cli-v0.84.36) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.34...sn_cli-v0.84.35) - 2023-10-31 - -### Other -- update dependencies - -## [0.84.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.33...sn_cli-v0.84.34) - 2023-10-31 - -### Other -- update dependencies - -## [0.84.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.32...sn_cli-v0.84.33) - 2023-10-31 - -### Other -- update dependencies - -## [0.84.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.31...sn_cli-v0.84.32) - 2023-10-30 - -### Other -- update dependencies - -## [0.84.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.30...sn_cli-v0.84.31) - 2023-10-30 - -### Added -- *(cli)* error out if empty wallet -- *(cli)* error out if we do not have enough balance - -## [0.84.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.29...sn_cli-v0.84.30) - 2023-10-30 - -### Other -- update dependencies - -## [0.84.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.28...sn_cli-v0.84.29) - 2023-10-30 - -### Other -- *(node)* use Bytes for Gossip related data types -- *(release)* sn_client-v0.95.11/sn_protocol-v0.8.7/sn_transfers-v0.14.8/sn_networking-v0.9.10 - -## [0.84.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.27...sn_cli-v0.84.28) - 2023-10-27 - -### Other -- update dependencies - -## [0.84.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.26...sn_cli-v0.84.27) - 2023-10-27 - -### Other -- update dependencies - -## [0.84.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.25...sn_cli-v0.84.26) - 2023-10-27 - -### Added -- *(cli)* verify as we upload when 1 batch - -## [0.84.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.24...sn_cli-v0.84.25) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.23...sn_cli-v0.84.24) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.22...sn_cli-v0.84.23) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.21...sn_cli-v0.84.22) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.20...sn_cli-v0.84.21) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.19...sn_cli-v0.84.20) - 2023-10-25 - -### Added -- *(cli)* chunk files in parallel - -### Fixed -- *(cli)* remove Arc from ProgressBar as it is Arc internally - -### Other -- *(cli)* add logs to indicate the time spent on chunking the files - -## [0.84.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.18...sn_cli-v0.84.19) - 2023-10-24 - -### Added -- *(cli)* wallet deposit cmd with no arg was not reading cash notes from disk -- *(cli)* new wallet create cmd allowing users to create a wallet from a given secret key - -## [0.84.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.17...sn_cli-v0.84.18) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.16...sn_cli-v0.84.17) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.15...sn_cli-v0.84.16) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.14...sn_cli-v0.84.15) - 2023-10-24 - -### Added -- *(log)* use LogBuilder to initialize logging - -### Other -- *(client)* log and wait tweaks - -## [0.84.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.13...sn_cli-v0.84.14) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.12...sn_cli-v0.84.13) - 2023-10-23 - -### Other -- update dependencies - -## [0.84.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.11...sn_cli-v0.84.12) - 2023-10-23 - -### Other -- update dependencies - -## [0.84.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.10...sn_cli-v0.84.11) - 2023-10-23 - -### Fixed -- *(cli)* don't bail if a payment was not found during verify/repayment - -## [0.84.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.9...sn_cli-v0.84.10) - 2023-10-23 - -### Other -- more custom debug and debug skips - -## [0.84.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.8...sn_cli-v0.84.9) - 2023-10-23 - -### Other -- update dependencies - -## [0.84.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.7...sn_cli-v0.84.8) - 2023-10-22 - -### Other -- update dependencies - -## [0.84.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.6...sn_cli-v0.84.7) - 2023-10-21 - -### Other -- update dependencies - -## [0.84.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.5...sn_cli-v0.84.6) - 2023-10-20 - -### Other -- update dependencies - -## [0.84.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.4...sn_cli-v0.84.5) - 2023-10-20 - -### Other -- update dependencies - -## [0.84.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.3...sn_cli-v0.84.4) - 2023-10-19 - -### Other -- update dependencies - -## [0.84.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.2...sn_cli-v0.84.3) - 2023-10-19 - -### Other -- update dependencies - -## [0.84.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.1...sn_cli-v0.84.2) - 2023-10-19 - -### Other -- update dependencies - -## [0.84.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.0...sn_cli-v0.84.1) - 2023-10-18 - -### Other -- update dependencies - -## [0.84.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.52...sn_cli-v0.84.0) - 2023-10-18 - -### Added -- *(client)* verify register uploads and retry and repay if failed - -### Other -- *(client)* always validate storage payments -- repay for data in node rewards tests - -## [0.83.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.51...sn_cli-v0.83.52) - 2023-10-18 - -### Other -- update dependencies - -## [0.83.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.50...sn_cli-v0.83.51) - 2023-10-17 - -### Other -- update dependencies - -## [0.83.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.49...sn_cli-v0.83.50) - 2023-10-16 - -### Other -- update dependencies - -## [0.83.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.48...sn_cli-v0.83.49) - 2023-10-16 - -### Other -- update dependencies - -## [0.83.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.47...sn_cli-v0.83.48) - 2023-10-13 - -### Other -- update dependencies - -## [0.83.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.46...sn_cli-v0.83.47) - 2023-10-13 - -### Other -- update dependencies - -## [0.83.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.45...sn_cli-v0.83.46) - 2023-10-12 - -### Other -- update dependencies - -## [0.83.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.44...sn_cli-v0.83.45) - 2023-10-12 - -### Other -- update dependencies - -## [0.83.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.43...sn_cli-v0.83.44) - 2023-10-12 - -### Other -- update dependencies - -## [0.83.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.42...sn_cli-v0.83.43) - 2023-10-11 - -### Fixed -- expose RecordMismatch errors and cleanup wallet if we hit that - -### Other -- *(docs)* cleanup comments and docs - -## [0.83.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.41...sn_cli-v0.83.42) - 2023-10-11 - -### Other -- update dependencies - -## [0.83.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.40...sn_cli-v0.83.41) - 2023-10-11 - -### Fixed -- make client handle payment error - -## [0.83.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.39...sn_cli-v0.83.40) - 2023-10-11 - -### Added -- showing expected holders to CLI when required - -## [0.83.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.38...sn_cli-v0.83.39) - 2023-10-11 - -### Other -- update dependencies - -## [0.83.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.37...sn_cli-v0.83.38) - 2023-10-10 - -### Added -- *(transfer)* special event for transfer notifs over gossipsub - -## [0.83.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.36...sn_cli-v0.83.37) - 2023-10-10 - -### Other -- update dependencies - -## [0.83.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.35...sn_cli-v0.83.36) - 2023-10-10 - -### Other -- update dependencies - -## [0.83.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.34...sn_cli-v0.83.35) - 2023-10-10 - -### Other -- update dependencies - -## [0.83.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.33...sn_cli-v0.83.34) - 2023-10-09 - -### Other -- update dependencies - -## [0.83.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.32...sn_cli-v0.83.33) - 2023-10-09 - -### Added -- ensure temp SE chunks got cleaned after uploading - -## [0.83.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.31...sn_cli-v0.83.32) - 2023-10-08 - -### Other -- update dependencies - -## [0.83.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.30...sn_cli-v0.83.31) - 2023-10-06 - -### Added -- feat!(sn_transfers): unify store api for wallet - -### Other -- remove deposit vs received cashnote disctinction - -## [0.83.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.29...sn_cli-v0.83.30) - 2023-10-06 - -### Other -- *(cli)* reuse the client::send function to send amount from wallet - -## [0.83.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.28...sn_cli-v0.83.29) - 2023-10-06 - -### Other -- update dependencies - -## [0.83.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.27...sn_cli-v0.83.28) - 2023-10-06 - -### Other -- update dependencies - -## [0.83.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.26...sn_cli-v0.83.27) - 2023-10-05 - -### Added -- *(metrics)* enable node monitoring through dockerized grafana instance - -## [0.83.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.25...sn_cli-v0.83.26) - 2023-10-05 - -### Added -- feat!(cli): remove concurrency argument - -### Fixed -- *(client)* remove concurrency limitations - -## [0.83.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.24...sn_cli-v0.83.25) - 2023-10-05 - -### Fixed -- *(sn_transfers)* be sure we store CashNotes before writing the wallet file - -## [0.83.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.23...sn_cli-v0.83.24) - 2023-10-05 - -### Fixed -- use specific verify func for chunk stored verification - -## [0.83.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.22...sn_cli-v0.83.23) - 2023-10-05 - -### Added -- use progress bars on `files upload` - -### Other -- use one files api and clarify variable names -- pay_for_chunks returns cost and new balance - -## [0.83.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.21...sn_cli-v0.83.22) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.20...sn_cli-v0.83.21) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.19...sn_cli-v0.83.20) - 2023-10-04 - -### Added -- *(client)* log the command invoked for safe - -## [0.83.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.18...sn_cli-v0.83.19) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.17...sn_cli-v0.83.18) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.16...sn_cli-v0.83.17) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.15...sn_cli-v0.83.16) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.14...sn_cli-v0.83.15) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.13...sn_cli-v0.83.14) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.12...sn_cli-v0.83.13) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.11...sn_cli-v0.83.12) - 2023-10-02 - -### Other -- update dependencies - -## [0.83.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.10...sn_cli-v0.83.11) - 2023-10-02 - -### Added -- add read transfer from file option -- faucet using transfers instead of sending raw cashnotes - -### Other -- trim transfer hex nl and spaces -- add some more error info printing - -## [0.83.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.9...sn_cli-v0.83.10) - 2023-10-02 - -### Other -- update dependencies - -## [0.83.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.8...sn_cli-v0.83.9) - 2023-10-02 - -### Added -- *(client)* show feedback on long wait for costs - -## [0.83.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.7...sn_cli-v0.83.8) - 2023-10-02 - -### Other -- update dependencies - -## [0.83.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.6...sn_cli-v0.83.7) - 2023-09-29 - -### Other -- update dependencies - -## [0.83.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.5...sn_cli-v0.83.6) - 2023-09-29 - -### Fixed -- *(cli)* dont bail on errors during repay/upload - -## [0.83.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.4...sn_cli-v0.83.5) - 2023-09-29 - -### Fixed -- *(client)* just skip empty files - -## [0.83.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.3...sn_cli-v0.83.4) - 2023-09-28 - -### Added -- client to client transfers - -## [0.83.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.2...sn_cli-v0.83.3) - 2023-09-27 - -### Added -- *(networking)* remove optional_semaphore being passed down from apps - -## [0.83.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.1...sn_cli-v0.83.2) - 2023-09-27 - -### Other -- update dependencies - -## [0.83.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.0...sn_cli-v0.83.1) - 2023-09-27 - -### Added -- *(logging)* set default log levels to be more verbose -- *(logging)* set default logging to data-dir - -### Other -- *(client)* add timestamp to client log path - -## [0.83.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.8...sn_cli-v0.83.0) - 2023-09-27 - -### Added -- deep clean sn_transfers, reduce exposition, remove dead code - -## [0.82.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.7...sn_cli-v0.82.8) - 2023-09-26 - -### Other -- update dependencies - -## [0.82.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.6...sn_cli-v0.82.7) - 2023-09-26 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC service to unsubscribe from gossipsub topics - -## [0.82.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.5...sn_cli-v0.82.6) - 2023-09-25 - -### Other -- update dependencies - -## [0.82.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.4...sn_cli-v0.82.5) - 2023-09-25 - -### Other -- update dependencies - -## [0.82.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.3...sn_cli-v0.82.4) - 2023-09-25 - -### Added -- *(cli)* wrap repayment error for clarity - -## [0.82.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.2...sn_cli-v0.82.3) - 2023-09-25 - -### Added -- *(peers)* use a common way to bootstrap into the network for all the bins -- *(cli)* fetch network contacts for the provided network name -- *(cli)* fetch bootstrap peers from network contacts - -### Other -- more logs around parsing network-contacts -- *(cli)* feature gate network contacts and fetch from URL - -## [0.82.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.1...sn_cli-v0.82.2) - 2023-09-25 - -### Other -- update dependencies - -## [0.82.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.0...sn_cli-v0.82.1) - 2023-09-22 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC services to pub/sub to gossipsub topics - -## [0.82.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.64...sn_cli-v0.82.0) - 2023-09-22 - -### Added -- *(cli)* deps update and arbitrary change for cli - -## [0.81.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.63...sn_cli-v0.81.64) - 2023-09-21 - -### Added -- provide a `files ls` command - -### Other -- *(release)* sn_client-v0.89.22 -- store uploaded files list as text -- clarify `files download` usage -- output address of uploaded file - -## [0.81.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.62...sn_cli-v0.81.63) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.61...sn_cli-v0.81.62) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.60...sn_cli-v0.81.61) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.59...sn_cli-v0.81.60) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.58...sn_cli-v0.81.59) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.57...sn_cli-v0.81.58) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.56...sn_cli-v0.81.57) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.55...sn_cli-v0.81.56) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.54...sn_cli-v0.81.55) - 2023-09-20 - -### Fixed -- make clearer cli send asks for whole token amounts, not nanos - -## [0.81.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.53...sn_cli-v0.81.54) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.52...sn_cli-v0.81.53) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.51...sn_cli-v0.81.52) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.50...sn_cli-v0.81.51) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.49...sn_cli-v0.81.50) - 2023-09-19 - -### Other -- error handling when failed fetch store cost - -## [0.81.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.48...sn_cli-v0.81.49) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.47...sn_cli-v0.81.48) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.46...sn_cli-v0.81.47) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.45...sn_cli-v0.81.46) - 2023-09-18 - -### Fixed -- avoid verification too close to put; remove un-necessary wait for put - -## [0.81.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.44...sn_cli-v0.81.45) - 2023-09-18 - -### Other -- some cleanups within the upload procedure - -## [0.81.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.43...sn_cli-v0.81.44) - 2023-09-18 - -### Other -- update dependencies - -## [0.81.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.42...sn_cli-v0.81.43) - 2023-09-18 - -### Fixed -- *(cli)* repay and upload after verifying all the chunks - -### Other -- *(cli)* use iter::chunks() API to batch and pay for our chunks - -## [0.81.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.41...sn_cli-v0.81.42) - 2023-09-15 - -### Added -- *(client)* pay for chunks in batches - -### Other -- *(cli)* move 'chunk_path' to files.rs -- *(client)* refactor chunk upload code to allow greater concurrency - -## [0.81.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.40...sn_cli-v0.81.41) - 2023-09-15 - -### Other -- update dependencies - -## [0.81.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.39...sn_cli-v0.81.40) - 2023-09-15 - -### Other -- update dependencies - -## [0.81.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.38...sn_cli-v0.81.39) - 2023-09-15 - -### Other -- update dependencies - -## [0.81.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.37...sn_cli-v0.81.38) - 2023-09-14 - -### Other -- update dependencies - -## [0.81.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.36...sn_cli-v0.81.37) - 2023-09-14 - -### Added -- expose batch_size to cli -- split upload procedure into batches - -## [0.81.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.35...sn_cli-v0.81.36) - 2023-09-14 - -### Other -- *(metrics)* rename feature flag and small fixes - -## [0.81.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.34...sn_cli-v0.81.35) - 2023-09-13 - -### Added -- *(register)* paying nodes for Register storage - -## [0.81.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.33...sn_cli-v0.81.34) - 2023-09-12 - -### Added -- utilize stream decryptor - -## [0.81.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.32...sn_cli-v0.81.33) - 2023-09-12 - -### Other -- update dependencies - -## [0.81.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.31...sn_cli-v0.81.32) - 2023-09-12 - -### Other -- *(metrics)* rename network metrics and remove from default features list - -## [0.81.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.30...sn_cli-v0.81.31) - 2023-09-12 - -### Added -- add tx and parent spends verification -- chunk payments using UTXOs instead of DBCs - -### Other -- use updated sn_dbc - -## [0.81.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.29...sn_cli-v0.81.30) - 2023-09-11 - -### Other -- update dependencies - -## [0.81.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.28...sn_cli-v0.81.29) - 2023-09-11 - -### Other -- utilize stream encryptor - -## [0.81.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.27...sn_cli-v0.81.28) - 2023-09-11 - -### Other -- update dependencies - -## [0.81.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.26...sn_cli-v0.81.27) - 2023-09-08 - -### Added -- *(client)* repay for chunks if they cannot be validated - -### Fixed -- *(client)* dont bail on failed upload before verify/repay - -### Other -- *(client)* refactor to have permits at network layer -- *(refactor)* remove wallet_client args from upload flow -- *(refactor)* remove upload_chunks semaphore arg - -## [0.81.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.25...sn_cli-v0.81.26) - 2023-09-07 - -### Other -- update dependencies - -## [0.81.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.24...sn_cli-v0.81.25) - 2023-09-07 - -### Other -- update dependencies - -## [0.81.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.23...sn_cli-v0.81.24) - 2023-09-07 - -### Other -- update dependencies - -## [0.81.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.22...sn_cli-v0.81.23) - 2023-09-06 - -### Other -- update dependencies - -## [0.81.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.21...sn_cli-v0.81.22) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.20...sn_cli-v0.81.21) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.19...sn_cli-v0.81.20) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.18...sn_cli-v0.81.19) - 2023-09-05 - -### Added -- *(cli)* properly init color_eyre, advise on hex parse fail - -## [0.81.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.17...sn_cli-v0.81.18) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.16...sn_cli-v0.81.17) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.15...sn_cli-v0.81.16) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.14...sn_cli-v0.81.15) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.13...sn_cli-v0.81.14) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.12...sn_cli-v0.81.13) - 2023-09-02 - -### Other -- update dependencies - -## [0.81.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.11...sn_cli-v0.81.12) - 2023-09-01 - -### Other -- update dependencies - -## [0.81.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.10...sn_cli-v0.81.11) - 2023-09-01 - -### Other -- *(cli)* better formatting for elapsed time statements -- *(transfers)* store dbcs by ref to avoid more clones - -## [0.81.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.9...sn_cli-v0.81.10) - 2023-09-01 - -### Other -- update dependencies - -## [0.81.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.8...sn_cli-v0.81.9) - 2023-09-01 - -### Other -- update dependencies - -## [0.81.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.7...sn_cli-v0.81.8) - 2023-08-31 - -### Added -- *(cli)* perform wallet actions without connecting to the network - -### Other -- remove unused async - -## [0.81.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.6...sn_cli-v0.81.7) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.5...sn_cli-v0.81.6) - 2023-08-31 - -### Added -- *(cli)* wallet cmd flag enabing to query a node's local wallet balance - -### Fixed -- *(cli)* don't try to create wallet paths when checking balance - -## [0.81.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.4...sn_cli-v0.81.5) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.3...sn_cli-v0.81.4) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.2...sn_cli-v0.81.3) - 2023-08-31 - -### Fixed -- correct bench download calculation - -## [0.81.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.1...sn_cli-v0.81.2) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.0...sn_cli-v0.81.1) - 2023-08-31 - -### Added -- *(cli)* expose 'concurrency' flag -- *(cli)* increase put parallelisation - -### Other -- *(client)* improve download concurrency. - -## [0.81.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.64...sn_cli-v0.81.0) - 2023-08-30 - -### Added -- refactor to allow greater upload parallelisation -- one transfer per data set, mapped dbcs to content addrs -- [**breaking**] pay each chunk holder direct -- feat!(protocol): get price and pay for each chunk individually -- feat!(protocol): remove chunk merkletree to simplify payment - -### Fixed -- *(tokio)* remove tokio fs - -### Other -- *(deps)* bump tokio to 1.32.0 -- *(client)* refactor client wallet to reduce dbc clones -- *(client)* pass around content payments map mut ref -- *(client)* reduce transferoutputs cloning - -## [0.80.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.63...sn_cli-v0.80.64) - 2023-08-30 - -### Other -- update dependencies - -## [0.80.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.62...sn_cli-v0.80.63) - 2023-08-30 - -### Other -- update dependencies - -## [0.80.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.61...sn_cli-v0.80.62) - 2023-08-29 - -### Other -- update dependencies - -## [0.80.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.60...sn_cli-v0.80.61) - 2023-08-25 - -### Other -- update dependencies - -## [0.80.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.59...sn_cli-v0.80.60) - 2023-08-24 - -### Other -- *(cli)* verify bench uploads once more - -## [0.80.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.58...sn_cli-v0.80.59) - 2023-08-24 - -### Other -- rust 1.72.0 fixes - -## [0.80.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.57...sn_cli-v0.80.58) - 2023-08-24 - -### Other -- update dependencies - -## [0.80.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.56...sn_cli-v0.80.57) - 2023-08-22 - -### Other -- update dependencies - -## [0.80.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.55...sn_cli-v0.80.56) - 2023-08-22 - -### Fixed -- fixes to allow upload file works properly - -## [0.80.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.54...sn_cli-v0.80.55) - 2023-08-21 - -### Other -- update dependencies - -## [0.80.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.53...sn_cli-v0.80.54) - 2023-08-21 - -### Other -- update dependencies - -## [0.80.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.52...sn_cli-v0.80.53) - 2023-08-18 - -### Other -- update dependencies - -## [0.80.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.51...sn_cli-v0.80.52) - 2023-08-18 - -### Other -- update dependencies - -## [0.80.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.50...sn_cli-v0.80.51) - 2023-08-17 - -### Other -- update dependencies - -## [0.80.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.49...sn_cli-v0.80.50) - 2023-08-17 - -### Other -- update dependencies - -## [0.80.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.48...sn_cli-v0.80.49) - 2023-08-17 - -### Other -- update dependencies - -## [0.80.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.47...sn_cli-v0.80.48) - 2023-08-17 - -### Fixed -- avoid download bench result polluted - -### Other -- more client logs - -## [0.80.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.46...sn_cli-v0.80.47) - 2023-08-16 - -### Other -- update dependencies - -## [0.80.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.45...sn_cli-v0.80.46) - 2023-08-16 - -### Other -- update dependencies - -## [0.80.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.44...sn_cli-v0.80.45) - 2023-08-16 - -### Other -- optimize benchmark flow - -## [0.80.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.43...sn_cli-v0.80.44) - 2023-08-15 - -### Other -- update dependencies - -## [0.80.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.42...sn_cli-v0.80.43) - 2023-08-14 - -### Other -- update dependencies - -## [0.80.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.41...sn_cli-v0.80.42) - 2023-08-14 - -### Other -- update dependencies - -## [0.80.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.40...sn_cli-v0.80.41) - 2023-08-11 - -### Other -- *(cli)* print cost info - -## [0.80.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.39...sn_cli-v0.80.40) - 2023-08-11 - -### Other -- update dependencies - -## [0.80.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.38...sn_cli-v0.80.39) - 2023-08-10 - -### Other -- update dependencies - -## [0.80.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.37...sn_cli-v0.80.38) - 2023-08-10 - -### Other -- update dependencies - -## [0.80.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.36...sn_cli-v0.80.37) - 2023-08-09 - -### Other -- update dependencies - -## [0.80.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.35...sn_cli-v0.80.36) - 2023-08-08 - -### Fixed -- *(cli)* remove manual faucet claim from benchmarking. -- *(node)* prevent panic in storage calcs - -### Other -- *(cli)* get more money for benching -- log bench errors - -## [0.80.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.34...sn_cli-v0.80.35) - 2023-08-07 - -### Other -- update dependencies - -## [0.80.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.33...sn_cli-v0.80.34) - 2023-08-07 - -### Other -- *(node)* dont verify during benchmarks - -## [0.80.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.32...sn_cli-v0.80.33) - 2023-08-07 - -### Added -- rework register addresses to include pk - -### Other -- cleanup comments and names - -## [0.80.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.31...sn_cli-v0.80.32) - 2023-08-07 - -### Other -- update dependencies - -## [0.80.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.30...sn_cli-v0.80.31) - 2023-08-04 - -### Other -- update dependencies - -## [0.80.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.29...sn_cli-v0.80.30) - 2023-08-04 - -### Other -- update dependencies - -## [0.80.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.28...sn_cli-v0.80.29) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.27...sn_cli-v0.80.28) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.26...sn_cli-v0.80.27) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.25...sn_cli-v0.80.26) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.24...sn_cli-v0.80.25) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.23...sn_cli-v0.80.24) - 2023-08-02 - -### Other -- update dependencies - -## [0.80.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.22...sn_cli-v0.80.23) - 2023-08-02 - -### Other -- update dependencies - -## [0.80.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.21...sn_cli-v0.80.22) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.20...sn_cli-v0.80.21) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.19...sn_cli-v0.80.20) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.18...sn_cli-v0.80.19) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.17...sn_cli-v0.80.18) - 2023-08-01 - -### Added -- *(cli)* add no-verify flag to cli - -### Other -- *(cli)* update logs and ci for payments - -## [0.80.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.16...sn_cli-v0.80.17) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.15...sn_cli-v0.80.16) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.14...sn_cli-v0.80.15) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.13...sn_cli-v0.80.14) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.12...sn_cli-v0.80.13) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.11...sn_cli-v0.80.12) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.10...sn_cli-v0.80.11) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.9...sn_cli-v0.80.10) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.8...sn_cli-v0.80.9) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.7...sn_cli-v0.80.8) - 2023-07-27 - -### Other -- update dependencies - -## [0.80.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.6...sn_cli-v0.80.7) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.5...sn_cli-v0.80.6) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.4...sn_cli-v0.80.5) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.3...sn_cli-v0.80.4) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.2...sn_cli-v0.80.3) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.1...sn_cli-v0.80.2) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.0...sn_cli-v0.80.1) - 2023-07-25 - -### Other -- update dependencies - -## [0.80.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.32...sn_cli-v0.80.0) - 2023-07-21 - -### Added -- *(cli)* allow to pass the hex-encoded DBC as arg -- *(protocol)* [**breaking**] make Chunks storage payment required - -## [0.79.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.31...sn_cli-v0.79.32) - 2023-07-20 - -### Other -- update dependencies - -## [0.79.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.30...sn_cli-v0.79.31) - 2023-07-20 - -### Other -- update dependencies - -## [0.79.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.29...sn_cli-v0.79.30) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.28...sn_cli-v0.79.29) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.27...sn_cli-v0.79.28) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.26...sn_cli-v0.79.27) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.25...sn_cli-v0.79.26) - 2023-07-18 - -### Other -- update dependencies - -## [0.79.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.24...sn_cli-v0.79.25) - 2023-07-18 - -### Other -- update dependencies - -## [0.79.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.23...sn_cli-v0.79.24) - 2023-07-18 - -### Fixed -- client - -## [0.79.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.22...sn_cli-v0.79.23) - 2023-07-18 - -### Other -- update dependencies - -## [0.79.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.21...sn_cli-v0.79.22) - 2023-07-17 - -### Fixed -- *(cli)* add more context when failing to decode a wallet - -## [0.79.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.20...sn_cli-v0.79.21) - 2023-07-17 - -### Other -- update dependencies - -## [0.79.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.19...sn_cli-v0.79.20) - 2023-07-17 - -### Added -- *(networking)* upgrade to libp2p 0.52.0 - -## [0.79.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.18...sn_cli-v0.79.19) - 2023-07-17 - -### Added -- *(client)* keep storage payment proofs in local wallet - -## [0.79.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.17...sn_cli-v0.79.18) - 2023-07-13 - -### Other -- update dependencies - -## [0.79.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.16...sn_cli-v0.79.17) - 2023-07-13 - -### Other -- update dependencies - -## [0.79.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.15...sn_cli-v0.79.16) - 2023-07-12 - -### Other -- client to upload paid chunks in batches -- chunk files only once when making payment for their storage - -## [0.79.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.14...sn_cli-v0.79.15) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.13...sn_cli-v0.79.14) - 2023-07-11 - -### Fixed -- *(client)* publish register on creation - -## [0.79.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.12...sn_cli-v0.79.13) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.11...sn_cli-v0.79.12) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.10...sn_cli-v0.79.11) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.9...sn_cli-v0.79.10) - 2023-07-10 - -### Other -- update dependencies - -## [0.79.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.8...sn_cli-v0.79.9) - 2023-07-10 - -### Other -- update dependencies - -## [0.79.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.7...sn_cli-v0.79.8) - 2023-07-10 - -### Added -- faucet server and cli DBC read - -### Fixed -- use Deposit --stdin instead of Read in cli -- wallet store - -## [0.79.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.6...sn_cli-v0.79.7) - 2023-07-10 - -### Other -- update dependencies - -## [0.79.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.5...sn_cli-v0.79.6) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.4...sn_cli-v0.79.5) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.3...sn_cli-v0.79.4) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.2...sn_cli-v0.79.3) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.1...sn_cli-v0.79.2) - 2023-07-06 - -### Other -- update dependencies - -## [0.79.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.0...sn_cli-v0.79.1) - 2023-07-06 - -### Other -- update dependencies - -## [0.79.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.26...sn_cli-v0.79.0) - 2023-07-06 - -### Added -- introduce `--log-format` arguments -- provide `--log-output-dest` arg for `safe` -- provide `--log-output-dest` arg for `safenode` - -### Other -- use data-dir rather than root-dir -- incorporate various feedback items - -## [0.78.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.25...sn_cli-v0.78.26) - 2023-07-05 - -### Other -- update dependencies - -## [0.78.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.24...sn_cli-v0.78.25) - 2023-07-05 - -### Other -- update dependencies - -## [0.78.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.23...sn_cli-v0.78.24) - 2023-07-05 - -### Other -- update dependencies - -## [0.78.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.22...sn_cli-v0.78.23) - 2023-07-04 - -### Other -- update dependencies - -## [0.78.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.21...sn_cli-v0.78.22) - 2023-07-03 - -### Other -- reduce SAMPLE_SIZE for the data_with_churn test - -## [0.78.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.20...sn_cli-v0.78.21) - 2023-06-29 - -### Other -- update dependencies - -## [0.78.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.19...sn_cli-v0.78.20) - 2023-06-29 - -### Other -- update dependencies - -## [0.78.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.18...sn_cli-v0.78.19) - 2023-06-28 - -### Other -- update dependencies - -## [0.78.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.17...sn_cli-v0.78.18) - 2023-06-28 - -### Added -- register refactor, kad reg without cmds - -## [0.78.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.16...sn_cli-v0.78.17) - 2023-06-28 - -### Other -- update dependencies - -## [0.78.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.15...sn_cli-v0.78.16) - 2023-06-28 - -### Other -- update dependencies - -## [0.78.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.14...sn_cli-v0.78.15) - 2023-06-27 - -### Other -- update dependencies - -## [0.78.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.13...sn_cli-v0.78.14) - 2023-06-27 - -### Other -- update dependencies - -## [0.78.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.12...sn_cli-v0.78.13) - 2023-06-27 - -### Other -- benchmark client download - -## [0.78.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.11...sn_cli-v0.78.12) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.10...sn_cli-v0.78.11) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.9...sn_cli-v0.78.10) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.8...sn_cli-v0.78.9) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.7...sn_cli-v0.78.8) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.6...sn_cli-v0.78.7) - 2023-06-24 - -### Other -- update dependencies - -## [0.78.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.5...sn_cli-v0.78.6) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.4...sn_cli-v0.78.5) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.3...sn_cli-v0.78.4) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.2...sn_cli-v0.78.3) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.1...sn_cli-v0.78.2) - 2023-06-22 - -### Other -- update dependencies - -## [0.78.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.0...sn_cli-v0.78.1) - 2023-06-22 - -### Other -- *(client)* initial refactor around uploads - -## [0.78.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.49...sn_cli-v0.78.0) - 2023-06-22 - -### Added -- use standarised directories for files/wallet commands - -## [0.77.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.48...sn_cli-v0.77.49) - 2023-06-21 - -### Other -- update dependencies - -## [0.77.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.47...sn_cli-v0.77.48) - 2023-06-21 - -### Other -- update dependencies - -## [0.77.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.46...sn_cli-v0.77.47) - 2023-06-21 - -### Other -- *(node)* obtain parent_tx from SignedSpend - -## [0.77.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.45...sn_cli-v0.77.46) - 2023-06-21 - -### Added -- provide option for log output in json - -## [0.77.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.44...sn_cli-v0.77.45) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.43...sn_cli-v0.77.44) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.42...sn_cli-v0.77.43) - 2023-06-20 - -### Other -- include the Tx instead of output DBCs as part of storage payment proofs -- use a set to collect Chunks addrs for build payment proof - -## [0.77.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.41...sn_cli-v0.77.42) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.40...sn_cli-v0.77.41) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.39...sn_cli-v0.77.40) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.38...sn_cli-v0.77.39) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.37...sn_cli-v0.77.38) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.36...sn_cli-v0.77.37) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.35...sn_cli-v0.77.36) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.34...sn_cli-v0.77.35) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.33...sn_cli-v0.77.34) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.32...sn_cli-v0.77.33) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.31...sn_cli-v0.77.32) - 2023-06-19 - -### Fixed -- *(safe)* check if upload path contains a file - -## [0.77.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.30...sn_cli-v0.77.31) - 2023-06-16 - -### Fixed -- CLI is missing local-discovery feature - -## [0.77.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.29...sn_cli-v0.77.30) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.28...sn_cli-v0.77.29) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.27...sn_cli-v0.77.28) - 2023-06-16 - -### Other -- improve memory benchmarks, remove broken download bench - -## [0.77.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.26...sn_cli-v0.77.27) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.25...sn_cli-v0.77.26) - 2023-06-16 - -### Fixed -- *(bin)* negate local-discovery check - -## [0.77.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.24...sn_cli-v0.77.25) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.23...sn_cli-v0.77.24) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.22...sn_cli-v0.77.23) - 2023-06-15 - -### Fixed -- parent spend issue - -## [0.77.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.21...sn_cli-v0.77.22) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.20...sn_cli-v0.77.21) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.19...sn_cli-v0.77.20) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.18...sn_cli-v0.77.19) - 2023-06-15 - -### Other -- use throughput for benchmarking - -## [0.77.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.17...sn_cli-v0.77.18) - 2023-06-15 - -### Other -- add initial benchmarks for prs and chart generation - -## [0.77.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.16...sn_cli-v0.77.17) - 2023-06-14 - -### Added -- include output DBC within payment proof for Chunks storage - -## [0.77.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.15...sn_cli-v0.77.16) - 2023-06-14 - -### Other -- update dependencies - -## [0.77.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.14...sn_cli-v0.77.15) - 2023-06-14 - -### Other -- use clap env and parse multiaddr - -## [0.77.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.13...sn_cli-v0.77.14) - 2023-06-14 - -### Added -- *(client)* expose req/resp timeout to client cli - -### Other -- *(client)* parse duration in clap derivation - -## [0.77.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.12...sn_cli-v0.77.13) - 2023-06-13 - -### Other -- update dependencies - -## [0.77.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.11...sn_cli-v0.77.12) - 2023-06-13 - -### Other -- update dependencies - -## [0.77.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.10...sn_cli-v0.77.11) - 2023-06-12 - -### Other -- update dependencies - -## [0.77.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.9...sn_cli-v0.77.10) - 2023-06-12 - -### Other -- update dependencies - -## [0.77.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.8...sn_cli-v0.77.9) - 2023-06-09 - -### Other -- improve documentation for cli commands - -## [0.77.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.7...sn_cli-v0.77.8) - 2023-06-09 - -### Other -- manually change crate version - -## [0.77.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.6...sn_cli-v0.77.7) - 2023-06-09 - -### Other -- update dependencies - -## [0.77.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.5...sn_cli-v0.77.6) - 2023-06-09 - -### Other -- emit git info with vergen - -## [0.77.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.4...sn_cli-v0.77.5) - 2023-06-09 - -### Other -- update dependencies - -## [0.77.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.3...sn_cli-v0.77.4) - 2023-06-09 - -### Other -- provide clarity on command arguments - -## [0.77.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.2...sn_cli-v0.77.3) - 2023-06-08 - -### Other -- update dependencies - -## [0.77.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.1...sn_cli-v0.77.2) - 2023-06-08 - -### Other -- improve documentation for cli arguments - -## [0.77.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.0...sn_cli-v0.77.1) - 2023-06-07 - -### Added -- making the CLI --peer arg global so it can be passed in any order -- bail out if empty list of addreses is provided for payment proof generation -- *(client)* add progress indicator for initial network connections -- attach payment proof when uploading Chunks -- collect payment proofs and make sure merkletree always has pow-of-2 leaves -- node side payment proof validation from a given Chunk, audit trail, and reason-hash -- use all Chunks of a file to generate payment the payment proof tree -- Chunk storage payment and building payment proofs - -### Other -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1" -- improve CLI --peer arg doc -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1 -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2" -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2 -- *(logs)* enable metrics feature by default -- small log wording updates -- making Chunk payment proof optional for now -- moving all payment proofs utilities into sn_transfers crate diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml deleted file mode 100644 index 59686dd5dc..0000000000 --- a/sn_cli/Cargo.toml +++ /dev/null @@ -1,88 +0,0 @@ -[package] -authors = ["MaidSafe Developers "] -description = "Safe Network CLI" -documentation = "https://docs.rs/sn_node" -edition = "2021" -homepage = "https://maidsafe.net" -license = "GPL-3.0" -name = "sn_cli" -readme = "README.md" -repository = "https://github.com/maidsafe/safe_network" -version = "0.95.3" - -[[bin]] -path = "src/bin/main.rs" -name = "safe" - -[[bench]] -name = "files" -harness = false - -[features] -default = ["metrics"] -distribution = ["base64", "bitcoin"] -local-discovery = [ - "sn_client/local-discovery", - "sn_peers_acquisition/local-discovery", -] -metrics = ["sn_logging/process-metrics"] -network-contacts = ["sn_peers_acquisition/network-contacts"] -open-metrics = ["sn_client/open-metrics"] - -[dependencies] -aes = "0.7.5" -base64 = { version = "0.22.0", optional = true } -bitcoin = { version = "0.31.0", optional = true } -block-modes = "0.8.1" -bls = { package = "blsttc", version = "8.0.1" } -bytes = { version = "1.0.1", features = ["serde"] } -custom_debug = "~0.6.1" -chrono = "~0.4.19" -clap = { version = "4.2.1", features = ["derive"] } -color-eyre = "~0.6" -dialoguer = "~0.11.0" -dirs-next = "~2.0.0" -futures = "~0.3.13" -hex = "~0.4.3" -indicatif = { version = "0.17.5", features = ["tokio"] } -libp2p = { version = "0.53", features = ["identify", "kad"] } -rand = "0.8.5" -rayon = "1.8.0" -reqwest = { version = "0.12.2", default-features = false, features = [ - "rustls-tls-manual-roots", -] } -rmp-serde = "1.1.1" -rpassword = "7.3.1" -serde = { version = "1.0.133", features = ["derive"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_client = { path = "../sn_client", version = "0.110.4" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -tempfile = "3.6.0" -tiny-keccak = "~2.0.2" -tokio = { version = "1.32.0", features = [ - "io-util", - "macros", - "parking_lot", - "rt", - "sync", - "time", - "fs", -] } -tracing = { version = "~0.1.26" } -url = "2.4.0" -walkdir = "~2.5.0" -xor_name = "5.0.0" - -[dev-dependencies] -eyre = "0.6.8" -criterion = "0.5.1" -tempfile = "3.6.0" -rand = { version = "~0.8.5", features = ["small_rng"] } -sn_client = { path = "../sn_client", version = "0.110.4", features = [ - "test-utils", -] } - -[lints] -workspace = true diff --git a/sn_cli/README.md b/sn_cli/README.md deleted file mode 100644 index f1a2f29edf..0000000000 --- a/sn_cli/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# sn_cli - -This directory contains the `safe` client binary. It is used to interact with the Safe Network and provides a range of commands for managing data, keys, wallets, and more. - -The `safe` binary includes the following subcommands: - -- `wallet`: Commands for wallet management. This includes creating wallets, checking balances, and making transactions. -- `files`: Commands for file management. This includes uploading, downloading, and deleting files. -- `register`: Commands for register management. This includes creating, reading, and writing to registers. diff --git a/sn_cli/src/acc_packet.rs b/sn_cli/src/acc_packet.rs deleted file mode 100644 index a9430e3449..0000000000 --- a/sn_cli/src/acc_packet.rs +++ /dev/null @@ -1,1603 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod change_tracking; - -use change_tracking::*; - -use super::{ - files::{download_file, FilesUploader}, - ChunkManager, -}; - -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, - protocol::storage::{Chunk, RegisterAddress, RetryStrategy}, - registers::EntryHash, - transfers::{DerivationIndex, MainSecretKey}, - Client, FilesApi, FolderEntry, FoldersApi, Metadata, UploadCfg, WalletClient, -}; - -use bls::PublicKey; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use std::{ - collections::{ - btree_map::{Entry, OccupiedEntry}, - BTreeMap, - }, - ffi::OsString, - fs::{create_dir_all, remove_dir_all, remove_file, File}, - io::Write, - path::{Path, PathBuf}, -}; -use tokio::task::JoinSet; -use tracing::trace; -use walkdir::{DirEntry, WalkDir}; -use xor_name::XorName; - -/// Derivation index used to obtain the account packet root folder xorname -// TODO: use eip2333 path for deriving keys -const ACC_PACKET_ADDR_DERIVATION_INDEX: DerivationIndex = DerivationIndex([0x0; 32]); - -/// Derivation index used to obtain the owner key of the account packet root folder. -/// The derived key pair is used to: -/// - Sign all data operations sent to the network. -/// - Set it as the owner of all Folders (Registers) created on the network. -/// - Encrypt all the Folders entries metadata chunks. -// TODO: use eip2333 path for deriving keys -const ACC_PACKET_OWNER_DERIVATION_INDEX: DerivationIndex = DerivationIndex([0x1; 32]); - -/// An `AccountPacket` object allows users to store and manage files, wallets, etc., with the ability -/// and tools necessary to keep an instance tracking a local storage path, as well as keeping it in sync -/// with its remote version stored on the network. -/// A `Client` and a the location for a funded local hot-wallet are required by this object in order to be able to connect -/// to the network, paying for data storage, and upload/retrieve information to/from the network. -/// -/// TODO: currently only files and folders are supported, wallets, keys, etc., to be added later. -/// -/// TODO: make use of eip2333 paths for deriving keys. Currently keys used for encrypting and signing -/// operations are derived from the root key provided using index derivation. -/// -/// The `AccountPacket` keeps a reference to the network address of the root Folder holding the user's -/// files/folder hierarchy. All tracking information is kept under the `.safe` directory on disk, whose -/// content is not uploaded to the network, but only kept locally in order to realise which files/dirs -/// the user has made changes on compared to their last version retrieved from the network. -/// -/// A subdirectory called `metadata` is kept under `.safe` directory with the following files: -/// - A file named `root_folder.addr` which contains the network address where the root Folder is stored, -/// which is the one holding the entire hierarchy of user's files/dirs to be kept in sync with local changes -/// made by the user. -/// - For each of the user's files/dirs, a serialised `MetadataTrackingInfo` instance is stored on using the -/// file/dir metadata chunk xorname as filename. The information stored in these files are used to realise -/// if changes were locally made by the user in comparison with the last version of such files/dirs retrieved -/// from the network. -/// Example of files generated within an account-packet to keep track of changes makde to user's files/dirs: -/// -/// ./my-acc-packet -/// ├── my_dir_1 -/// ├── my_file.txt -/// ├── my_dir_2 -/// │ ├── other_dir -/// │ └── my_other_file.txt -/// └── .safe -/// ├── chunk_artifacts -/// │ ├── ... -/// │ ... -/// ├── metadata -/// │ ├── 082cc90c900fa08d36067246a1e6136a828f1aae4926268c4349c200d56e34b9 -/// │ ├── 102c5536a10682bc3cdd4a1915fe2ad5e839cb94d0d3f124d0c18aee1d49ce50 -/// │ ├── 31824937c47a979df64af591f2e43f76190e65af835c4b338cbe7a7ba3f7d3cb -/// │ ├── 36778e471083140bc111677e2a86e49f4c0c20bc14ff2ad610e22615b72260b8 -/// │ ├── 3edd953cc320449e09b69b7b1b909a53874ee477f602f1a807dfd8057378367e -/// │ └── root_folder.addr -/// └── uploaded_files -/// ├── ... -/// ... -/// -/// There are other files which are stored under `.safe/chunk_artifacts` and `.safe/uploaded_files` directories -/// which are managed by the `ChunkManager` in order to locally cache chunked files, and a list of files -/// already uploaded to the network, to prevent from chunking and/or uploading the same files again. For more -/// details about these files, please refer to the `ChunkManager` module. -pub struct AccountPacket { - client: Client, - wallet_dir: PathBuf, - files_dir: PathBuf, - meta_dir: PathBuf, - tracking_info_dir: PathBuf, - curr_tracking_info: BTreeMap, - root_folder_addr: RegisterAddress, - root_folder_created: bool, -} - -impl AccountPacket { - /// Initialise directory as a fresh new packet. - /// All keys used for encrypting the files/folders metadata chunks and signing - /// operations are derived from the root key provided using index derivation. - /// The root Folder address and owner are also derived from the root SK. - /// A password can be optionally provided to encrypt the root SK before storing it on disk. - pub fn init( - client: Client, - wallet_dir: &Path, - path: &Path, - root_sk: &MainSecretKey, - password: Option<&[u8]>, - ) -> Result { - let (_, tracking_info_dir, meta_dir) = build_tracking_info_paths(path)?; - - // If there is already some tracking info we bail out as this is meant ot be a fresh new packet. - if let Ok((addr, _)) = read_root_folder_addr(&meta_dir) { - bail!( - "The local path {path:?} is already being tracked with Folder address: {}", - addr.to_hex() - ); - } - - let (client, root_folder_addr) = derive_keys_and_address(client, root_sk); - store_root_folder_tracking_info(&meta_dir, root_folder_addr, false)?; - store_root_sk(&tracking_info_dir, root_sk, password)?; - Self::from_path(client, wallet_dir, path, password) - } - - /// Create AccountPacket instance from a directory which has been already initialised. - pub fn from_path( - client: Client, - wallet_dir: &Path, - path: &Path, - password: Option<&[u8]>, - ) -> Result { - let (files_dir, tracking_info_dir, meta_dir) = build_tracking_info_paths(path)?; - let root_sk = read_root_sk(&tracking_info_dir, password)?; - let (client, root_folder_addr) = derive_keys_and_address(client, &root_sk); - - // this will fail if the directory was not previously initialised with 'init'. - let curr_tracking_info = read_tracking_info_from_disk(&meta_dir)?; - let (read_folder_addr, root_folder_created) = read_root_folder_addr(&meta_dir) - .map_err(|_| eyre!("Root Folder address not found, make sure the directory {path:?} is initialised."))?; - if read_folder_addr != root_folder_addr { - bail!( - "The path is already tracking another Folder with address: {}", - read_folder_addr.to_hex() - ); - } - - Ok(Self { - client, - wallet_dir: wallet_dir.to_path_buf(), - files_dir, - meta_dir, - tracking_info_dir, - curr_tracking_info, - root_folder_addr, - root_folder_created, - }) - } - - /// Return the address of the root Folder - pub fn root_folder_addr(&self) -> RegisterAddress { - self.root_folder_addr - } - - /// Retrieve and store entire Folders hierarchy from the network, generating tracking info. - pub async fn retrieve_folders( - client: &Client, - wallet_dir: &Path, - root_sk: &MainSecretKey, - password: Option<&[u8]>, - download_path: &Path, - batch_size: usize, - retry_strategy: RetryStrategy, - ) -> Result { - create_dir_all(download_path)?; - let (files_dir, tracking_info_dir, meta_dir) = build_tracking_info_paths(download_path)?; - - let (client, root_folder_addr) = derive_keys_and_address(client.clone(), root_sk); - - if let Ok((addr, _)) = read_root_folder_addr(&meta_dir) { - // bail out if there is already a root folder address different from the passed in - if addr == root_folder_addr { - bail!("The download path is already tracking that Folder, use 'sync' instead."); - } else { - bail!( - "The download path is already tracking another Folder with address: {}", - addr.to_hex() - ); - } - } else { - store_root_folder_tracking_info(&meta_dir, root_folder_addr, true)?; - store_root_sk(&tracking_info_dir, root_sk, password)?; - } - - let mut acc_packet = Self { - client: client.clone(), - wallet_dir: wallet_dir.to_path_buf(), - files_dir, - meta_dir, - tracking_info_dir, - curr_tracking_info: BTreeMap::default(), - root_folder_addr, - root_folder_created: true, - }; - - let folder_name: OsString = download_path.file_name().unwrap_or_default().into(); - let folders_api = - FoldersApi::retrieve(client.clone(), wallet_dir, root_folder_addr).await?; - let folders_to_download = vec![(folder_name, folders_api, download_path.to_path_buf())]; - - let _ = acc_packet - .download_folders_and_files(folders_to_download, batch_size, retry_strategy) - .await?; - - acc_packet.curr_tracking_info = read_tracking_info_from_disk(&acc_packet.meta_dir)?; - - Ok(acc_packet) - } - - /// Generate a report with differences found in local files/folders in comparison with their versions stored on the network. - pub fn status(&self) -> Result<()> { - println!("Looking for local changes made to files/folders compared to version on network at: {} ...", self.root_folder_addr().to_hex()); - let changes = self.scan_files_and_folders_for_changes(false)?; - - if changes.mutations.is_empty() { - println!("No local changes made to files/folders."); - } else { - println!("Local changes made to files/folders:"); - changes.mutations.iter().for_each(|m| println!("{m}")); - - let num_of_changes = changes.mutations.len(); - println!("\nChanges found to local files/folders: {num_of_changes}"); - } - Ok(()) - } - - /// Sync local changes made to files and folder with their version on the network, - /// both pushing and pulling changes to/form the network. - pub async fn sync(&mut self, upload_cfg: UploadCfg, make_data_public: bool) -> Result<()> { - let ChangesToApply { folders, mutations } = - self.scan_files_and_folders_for_changes(make_data_public)?; - - if mutations.is_empty() { - println!("No local changes made to files/folders to be pushed to network."); - } else { - println!("Local changes made to files/folders to be synced with network:"); - mutations.iter().for_each(|m| println!("{m}")); - } - - println!("Paying for folders hierarchy and uploading..."); - let synced_folders = self - .pay_and_sync_folders(folders, upload_cfg, make_data_public) - .await?; - - // mark root folder as created if it wasn't already - if !self.root_folder_created { - self.root_folder_created = true; - store_root_folder_tracking_info( - &self.meta_dir, - self.root_folder_addr, - self.root_folder_created, - )?; - } - - // update tracking information based on mutations detected locally - for mutation in mutations { - match mutation { - Mutation::NewFile(tracking_info) | Mutation::NewFolder(tracking_info) => { - self.store_tracking_info(tracking_info)?; - } - Mutation::FileRemoved((_, meta_xorname)) - | Mutation::FolderRemoved((_, meta_xorname)) => { - self.remove_tracking_info(meta_xorname); - } - Mutation::FileContentChanged((meta_xorname, tracking_info)) => { - self.store_tracking_info(tracking_info)?; - self.remove_tracking_info(meta_xorname); - } - } - } - - // download files/folders which are new in the synced folders - let folders_to_download: Vec<_> = synced_folders - .iter() - .map(|(path, (folders_api, _))| { - let folder_name: OsString = path.file_name().unwrap_or_default().into(); - (folder_name, folders_api.clone(), path.clone()) - }) - .collect(); - let mut updated_folders = self - .download_folders_and_files( - folders_to_download, - upload_cfg.batch_size, - upload_cfg.retry_strategy, - ) - .await?; - - // Now let's check if any file/folder was removed remotely so we remove them locally from disk. - // We do it in two phases, first we get rid of all dirs that were removed, then we go through - // the files, this is to make sure we remove files which belong to nested folders being removed. - let mut curr_tracking_info = read_tracking_info_from_disk(&self.meta_dir)?; - curr_tracking_info.retain(|_, tracking_info| { - if let FolderEntry::Folder(_) = tracking_info.metadata.content { - !self.remove_tracking_if_not_found_in_folders(tracking_info, &mut updated_folders) - } else { - true - } - }); - curr_tracking_info.retain(|_, tracking_info| { - if let FolderEntry::File(_) = tracking_info.metadata.content { - !self.remove_tracking_if_not_found_in_folders(tracking_info, &mut updated_folders) - } else { - true - } - }); - - self.curr_tracking_info = curr_tracking_info; - - Ok(()) - } - - // Private helpers - - // Generate the path relative to the user's root folder - fn get_relative_path(&self, path: &Path) -> Result { - let relative_path = path - .to_path_buf() - .canonicalize()? - .strip_prefix(&self.files_dir)? - .to_path_buf(); - Ok(relative_path) - } - - // Store tracking info in a file to keep track of any changes made to the source file/folder - fn store_tracking_info( - &self, - MetadataTrackingInfo { - file_path, - meta_xorname, - metadata, - entry_hash, - }: MetadataTrackingInfo, - ) -> Result<()> { - let metadata_file_path = self.meta_dir.join(hex::encode(meta_xorname)); - let mut meta_file = File::create(metadata_file_path)?; - - let tracking_info = MetadataTrackingInfo { - // we store the relative path so the root folder can be moved to - // different locations/paths if desired by the user. - file_path: self.get_relative_path(&file_path)?, - meta_xorname, - metadata, - entry_hash, - }; - - meta_file.write_all(&rmp_serde::to_vec(&tracking_info)?)?; - - Ok(()) - } - - // Remove tracking information file for given xorname - fn remove_tracking_info(&self, meta_xorname: XorName) { - let metadata_file_path = self.meta_dir.join(hex::encode(meta_xorname)); - if let Err(err) = remove_file(&metadata_file_path) { - println!("Failed to remove tracking info file {metadata_file_path:?}: {err}"); - } - } - - // If the file/folder referenced by the tracking info provided is not part of the passed Folders - // hierarchy, remove it from local disk along with its tracking information. - // Returns whether the file/folder was removed. - fn remove_tracking_if_not_found_in_folders( - &self, - tracking_info: &MetadataTrackingInfo, - folders: &mut Folders, - ) -> bool { - let mut removed = false; - let abs_path = self.files_dir.join(&tracking_info.file_path); - match tracking_info.metadata.content { - FolderEntry::Folder(_) => { - match find_by_name_in_parent_folder( - &tracking_info.metadata.name, - &abs_path, - folders, - ) { - Some(meta_xorname) => { - if meta_xorname != tracking_info.meta_xorname { - self.remove_tracking_info(tracking_info.meta_xorname); - removed = true; - } - } - None => { - if let Err(err) = remove_dir_all(&abs_path) { - trace!("Failed to remove directory {abs_path:?}: {err:?}"); - } - self.remove_tracking_info(tracking_info.meta_xorname); - folders.remove(&abs_path); - removed = true; - } - } - } - FolderEntry::File(_) => { - match find_by_name_in_parent_folder( - &tracking_info.metadata.name, - &abs_path, - folders, - ) { - Some(meta_xorname) => { - if meta_xorname != tracking_info.meta_xorname { - self.remove_tracking_info(tracking_info.meta_xorname); - removed = true; - } - } - None => { - if let Err(err) = remove_file(&abs_path) { - // this is expected if parent folder was just removed as part of this syncing flow. - trace!("Failed to remove file {abs_path:?}: {err:?}"); - } - self.remove_tracking_info(tracking_info.meta_xorname); - removed = true; - } - } - } - } - - removed - } - - // Scan existing files and folders on disk, generating a report of all the detected - // changes based on the tracking info kept locally. - // If make_data_public is false the metadata chunks are encrypted. - fn scan_files_and_folders_for_changes(&self, make_data_public: bool) -> Result { - // we don't use the local cache in order to realise of any changes made to files content. - let mut chunk_manager = ChunkManager::new(&self.tracking_info_dir); - chunk_manager.chunk_with_iter(self.iter_only_files(), false, false)?; - - let encryption_pk = if make_data_public { - None - } else { - // we pass down the key to encrypt the metadata chunk of any new content detected. - Some(self.client.signer_pk()) - }; - - let mut changes = self.read_folders_hierarchy_from_disk(encryption_pk)?; - - // add chunked files to the corresponding Folders - let folders = &mut changes.folders; - for chunked_file in chunk_manager.iter_chunked_files() { - let file_path = &chunked_file.file_path; - if let Some(Entry::Occupied(mut parent_folder)) = file_path - .parent() - .map(|parent| folders.entry(parent.to_path_buf())) - { - // try to find the tracking info of the file/folder by its name - match self.get_tracking_info(file_path) { - Ok(Some(tracking_info)) => match &tracking_info.metadata.content { - FolderEntry::File(chunk) => { - if chunk.address() != &chunked_file.head_chunk_address { - let (entry_hash, meta_xorname, metadata) = replace_item_in_folder( - &mut parent_folder, - tracking_info.entry_hash, - chunked_file.file_name.clone(), - chunked_file.data_map.clone(), - encryption_pk, - )?; - - changes.mutations.push(Mutation::FileContentChanged(( - tracking_info.meta_xorname, - MetadataTrackingInfo { - file_path: file_path.to_path_buf(), - meta_xorname, - metadata, - entry_hash, - }, - ))); - } - } - FolderEntry::Folder(_) => { - // New file found where there used to be a folder - let (entry_hash, meta_xorname, metadata) = replace_item_in_folder( - &mut parent_folder, - tracking_info.entry_hash, - chunked_file.file_name.clone(), - chunked_file.data_map.clone(), - encryption_pk, - )?; - changes - .mutations - .push(Mutation::NewFile(MetadataTrackingInfo { - file_path: file_path.to_path_buf(), - meta_xorname, - metadata, - entry_hash, - })); - } - }, - Ok(None) => { - let (entry_hash, meta_xorname, metadata) = - parent_folder.get_mut().0.add_file( - chunked_file.file_name.clone(), - chunked_file.data_map.clone(), - encryption_pk, - )?; - parent_folder.get_mut().1.has_new_entries(); - - changes - .mutations - .push(Mutation::NewFile(MetadataTrackingInfo { - file_path: file_path.to_path_buf(), - meta_xorname, - metadata, - entry_hash, - })); - } - Err(err) => { - println!("Skipping file {file_path:?}: {err:?}"); - } - } - } - } - - // now let's check if any file/folder was removed from disk - for (item_path, tracking_info) in self.curr_tracking_info.iter() { - let abs_path = self.files_dir.join(item_path); - match tracking_info.metadata.content { - FolderEntry::Folder(_) => { - if !folders.contains_key(&abs_path) { - remove_from_parent(folders, &abs_path, tracking_info.entry_hash)?; - changes.mutations.push(Mutation::FolderRemoved(( - abs_path, - tracking_info.meta_xorname, - ))); - } - } - FolderEntry::File(_) => { - if chunk_manager - .iter_chunked_files() - .all(|chunked_file| chunked_file.file_path != abs_path) - { - remove_from_parent(folders, &abs_path, tracking_info.entry_hash)?; - changes.mutations.push(Mutation::FileRemoved(( - abs_path, - tracking_info.meta_xorname, - ))); - } - } - } - } - - Ok(changes) - } - - // Build Folders hierarchy from the set files dir. The metadata chunk of every new folder - // will be encrpyted if an encrpytion key has been provided. - fn read_folders_hierarchy_from_disk( - &self, - encryption_pk: Option, - ) -> Result { - let mut changes = ChangesToApply::default(); - for (dir_path, depth, parent, dir_name) in self.iter_only_dirs().filter_map(|entry| { - entry.path().parent().map(|parent| { - ( - entry.path().to_path_buf(), - entry.depth(), - parent.to_owned(), - entry.file_name().to_owned(), - ) - }) - }) { - let (folder, folder_change) = changes - .folders - .entry(dir_path.clone()) - .or_insert(self.find_folder_in_tracking_info(&dir_path)?) - .clone(); - let curr_folder_addr = *folder.address(); - - if depth > 0 { - let (parent_folder, parent_folder_change) = changes - .folders - .entry(parent.clone()) - .or_insert(self.find_folder_in_tracking_info(&parent)?); - - if folder_change.is_new_folder() { - let (entry_hash, meta_xorname, metadata) = - parent_folder.add_folder(dir_name, curr_folder_addr, encryption_pk)?; - parent_folder_change.has_new_entries(); - - changes - .mutations - .push(Mutation::NewFolder(MetadataTrackingInfo { - file_path: dir_path, - meta_xorname, - metadata, - entry_hash, - })); - } - } - } - - Ok(changes) - } - - // Read local tracking info for given file/folder item - fn get_tracking_info(&self, path: &Path) -> Result> { - let path = self.get_relative_path(path)?; - Ok(self.curr_tracking_info.get(&path)) - } - - // Instantiate a FolderApi based on local tracking info for given folder item - fn find_folder_in_tracking_info(&self, path: &Path) -> Result<(FoldersApi, FolderChange)> { - let mut folder_change = FolderChange::NewFolder; - let address = if path == self.files_dir { - if self.root_folder_created { - folder_change = FolderChange::NoChange; - } - Some(self.root_folder_addr) - } else { - self.get_tracking_info(path)?.and_then(|tracking_info| { - match tracking_info.metadata.content { - FolderEntry::Folder(addr) => { - folder_change = FolderChange::NoChange; - Some(addr) - } - FolderEntry::File(_) => None, - } - }) - }; - - let folders_api = FoldersApi::new(self.client.clone(), &self.wallet_dir, address)?; - Ok((folders_api, folder_change)) - } - - // Creates an iterator over the user's dirs names, excluding the '.safe' tracking dir - fn iter_only_dirs(&self) -> impl Iterator { - WalkDir::new(&self.files_dir) - .into_iter() - .filter_entry(|e| e.file_type().is_dir() && e.file_name() != SAFE_TRACKING_CHANGES_DIR) - .flatten() - } - - // Creates an iterator over the user's file, excluding the tracking files under '.safe' dir - fn iter_only_files(&self) -> impl Iterator { - WalkDir::new(&self.files_dir) - .into_iter() - .filter_entry(|e| e.file_type().is_file() || e.file_name() != SAFE_TRACKING_CHANGES_DIR) - .flatten() - .filter(|e| e.file_type().is_file()) - } - - // Pay and upload all the files and folder. - async fn pay_and_sync_folders( - &self, - folders: Folders, - upload_cfg: UploadCfg, - make_data_public: bool, - ) -> Result { - let files_uploader = FilesUploader::new(self.client.clone(), self.wallet_dir.clone()) - .set_upload_cfg(upload_cfg) - .set_make_data_public(make_data_public) - .insert_entries(self.iter_only_files()); - let _summary = files_uploader.start_upload().await?; - - // Let's make the storage payment for Folders - let wallet = load_account_wallet_or_create_with_mnemonic(&self.wallet_dir, None)?; - - let mut wallet_client = WalletClient::new(self.client.clone(), wallet); - let mut net_addresses = vec![]; - let mut new_folders = 0; - // let's collect list of addresses we need to pay for - folders.iter().for_each(|(_, (folder, folder_change))| { - if folder_change.is_new_folder() { - net_addresses.push(folder.as_net_addr()); - new_folders += 1; - } - net_addresses.extend(folder.meta_addrs_to_pay()); - }); - - let payment_result = wallet_client - .pay_for_storage(net_addresses.into_iter()) - .await?; - match payment_result - .storage_cost - .checked_add(payment_result.royalty_fees) - { - Some(cost) => { - let balance = wallet_client.balance(); - println!("Made payment of {cost} for {new_folders} Folders. New balance: {balance}",) - } - None => bail!("Failed to calculate total payment cost"), - } - - // Sync Folders concurrently now that payments have been made. - let mut tasks = JoinSet::new(); - for (path, (mut folder, folder_change)) in folders { - let op = if folder_change.is_new_folder() { - "Creation" - } else { - "Syncing" - }; - - tasks.spawn(async move { - match folder.sync(upload_cfg).await { - Ok(()) => { - println!( - "{op} of Folder (for {path:?}) succeeded. Address: {}", - folder.address().to_hex() - ); - } - Err(err) => { - println!("{op} of Folder (for {path:?}) failed: {err}") - } - } - (path, folder, folder_change) - }); - } - - let mut synced_folders = Folders::new(); - while let Some(res) = tasks.join_next().await { - match res { - Ok((path, folder, c)) => { - synced_folders.insert(path, (folder, c)); - } - Err(err) => { - println!("Failed to sync/create a Folder with/on the network: {err:?}"); - } - } - } - - Ok(synced_folders) - } - - // Download a Folders and their files from the network and generate tracking info - async fn download_folders_and_files( - &self, - mut folders_to_download: Vec<(OsString, FoldersApi, PathBuf)>, - batch_size: usize, - retry_strategy: RetryStrategy, - ) -> Result { - let mut files_to_download = vec![]; - let mut updated_folders = Folders::new(); - while let Some((name, mut folders_api, target_path)) = folders_to_download.pop() { - if updated_folders.contains_key(&target_path) { - // we've already downloaded this Folder - continue; - } - - println!( - "Downloading Folder {name:?} from {}", - folders_api.address().to_hex() - ); - self.download_folder_from_network( - &target_path, - &mut folders_api, - &mut files_to_download, - &mut folders_to_download, - ) - .await?; - updated_folders.insert(target_path, (folders_api, FolderChange::NoChange)); - } - - let files_api: FilesApi = FilesApi::new(self.client.clone(), self.files_dir.clone()); - for (file_name, data_map_chunk, path) in files_to_download { - download_file( - files_api.clone(), - *data_map_chunk.name(), - (file_name, Some(data_map_chunk)), - &path, - false, - batch_size, - retry_strategy, - ) - .await; - } - - Ok(updated_folders) - } - - // Download a Folder from the network and generate tracking info - async fn download_folder_from_network( - &self, - target_path: &Path, - folders_api: &mut FoldersApi, - files_to_download: &mut Vec<(OsString, Chunk, PathBuf)>, - folders_to_download: &mut Vec<(OsString, FoldersApi, PathBuf)>, - ) -> Result<()> { - for (entry_hash, (meta_xorname, metadata)) in folders_api.entries().await?.into_iter() { - let name = metadata.name.clone(); - let item_path = target_path.join(name.clone()); - if let Ok(Some(tracking_info)) = self.get_tracking_info(&item_path) { - if tracking_info.meta_xorname == meta_xorname { - // thus we already have this same file/folder locally - continue; - } - } - - match &metadata.content { - FolderEntry::File(data_map_chunk) => { - files_to_download.push(( - name.clone().into(), - data_map_chunk.clone(), - target_path.to_path_buf(), - )); - let _ = File::create(&item_path)?; - } - FolderEntry::Folder(subfolder_addr) => { - let folders_api = FoldersApi::retrieve( - self.client.clone(), - &self.wallet_dir, - *subfolder_addr, - ) - .await?; - - folders_to_download.push((name.clone().into(), folders_api, item_path.clone())); - create_dir_all(&item_path)?; - } - }; - - self.store_tracking_info(MetadataTrackingInfo { - file_path: item_path, - meta_xorname, - metadata, - entry_hash, - })?; - } - - Ok(()) - } -} - -// Given an absolute path, find the Folder containing such item, and remove it from its entries. -fn remove_from_parent(folders: &mut Folders, path: &Path, entry_hash: EntryHash) -> Result<()> { - if let Some((parent_folder, folder_change)) = path.parent().and_then(|p| folders.get_mut(p)) { - folder_change.has_new_entries(); - parent_folder.remove_item(entry_hash)?; - } - Ok(()) -} - -// Replace a file/folder item from a given Folder (passed in as a container's OccupiedEntry'). -// The metadata chunk of the new item (folder/file) will be encrpyted if a key has been provided. -fn replace_item_in_folder( - folder: &mut OccupiedEntry<'_, PathBuf, (FoldersApi, FolderChange)>, - entry_hash: EntryHash, - file_name: OsString, - data_map: Chunk, - encryption_pk: Option, -) -> Result<(EntryHash, XorName, Metadata)> { - let (ref mut folders_api, ref mut folder_change) = folder.get_mut(); - folder_change.has_new_entries(); - let res = folders_api.replace_file( - entry_hash, - file_name.clone(), - data_map.clone(), - encryption_pk, - )?; - Ok(res) -} - -// Search for a file/folder item in its parent Folder by its name, returning its metadata chunk xorname. -fn find_by_name_in_parent_folder(name: &str, path: &Path, folders: &Folders) -> Option { - path.parent() - .and_then(|parent| folders.get(parent)) - .and_then(|(folder, _)| folder.find_by_name(name)) - .map(|(meta_xorname, _)| *meta_xorname) -} - -// Using the provided root SK, derive client signer SK and the root Folder address from it. -// It returns the Client updated with the derived signing key set, along with the derived Register address. -// TODO: use eip2333 path for deriving keys and address. -fn derive_keys_and_address( - mut client: Client, - root_sk: &MainSecretKey, -) -> (Client, RegisterAddress) { - // Set the client signer SK as a derived key from the root key. This will - // be used for signing operations and also for encrypting metadata chunks. - let signer_sk = root_sk - .derive_key(&ACC_PACKET_OWNER_DERIVATION_INDEX) - .secret_key(); - client.set_signer_key(signer_sk); - - // Derive a key from the root key to generate the root Folder xorname, and use - // the client signer's corresponding PK as the owner of it. - let derived_pk = root_sk - .derive_key(&ACC_PACKET_ADDR_DERIVATION_INDEX) - .secret_key() - .public_key(); - let root_folder_addr = RegisterAddress::new( - XorName::from_content(&derived_pk.to_bytes()), - client.signer_pk(), - ); - - (client, root_folder_addr) -} - -#[cfg(test)] -mod tests { - // All tests require a network running so Clients can be instantiated. - - use crate::acc_packet::{ - derive_keys_and_address, RECOVERY_SEED_FILENAME, SAFE_TRACKING_CHANGES_DIR, - }; - - use super::{ - read_root_folder_addr, read_tracking_info_from_disk, AccountPacket, Metadata, - MetadataTrackingInfo, Mutation, ACC_PACKET_ADDR_DERIVATION_INDEX, - ACC_PACKET_OWNER_DERIVATION_INDEX, - }; - use rand::{thread_rng, Rng}; - use sn_client::{ - protocol::storage::{Chunk, RetryStrategy}, - registers::{EntryHash, RegisterAddress}, - test_utils::{get_funded_wallet, get_new_client, random_file_chunk}, - transfers::MainSecretKey, - FolderEntry, UploadCfg, BATCH_SIZE, - }; - - use bls::SecretKey; - use bytes::Bytes; - use eyre::{bail, eyre, Result}; - use std::{ - collections::{BTreeMap, BTreeSet}, - fs::{create_dir_all, remove_dir_all, remove_file, File, OpenOptions}, - io::{Read, Write}, - path::{Path, PathBuf}, - }; - use xor_name::XorName; - - const SYNC_OPTS: (UploadCfg, bool) = { - let cfg = UploadCfg { - verify_store: true, - batch_size: BATCH_SIZE, - retry_strategy: RetryStrategy::Quick, - show_holders: false, - max_repayments_for_failed_data: 1, - collect_registers: false, - }; - let make_data_public = false; - (cfg, make_data_public) - }; - - #[tokio::test] - async fn test_acc_packet_private_helpers() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let files_path = tmp_dir.path().join("myfiles"); - create_dir_all(&files_path)?; - - let owner_pk = root_sk - .derive_key(&ACC_PACKET_OWNER_DERIVATION_INDEX) - .secret_key() - .public_key(); - let xorname = XorName::from_content( - &root_sk - .derive_key(&ACC_PACKET_ADDR_DERIVATION_INDEX) - .secret_key() - .public_key() - .to_bytes(), - ); - let expected_folder_addr = RegisterAddress::new(xorname, owner_pk); - - let acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &files_path, &root_sk, None)?; - assert_eq!( - derive_keys_and_address(client, &root_sk).1, - expected_folder_addr - ); - assert_eq!(acc_packet.root_folder_addr(), expected_folder_addr); - - let mut test_files = create_test_files_on_disk(&files_path)?; - let mut rng = rand::thread_rng(); - let dummy_metadata = Metadata { - name: "dummy".to_string(), - content: FolderEntry::File(Chunk::new(Bytes::new())), - }; - for (relative_path, _) in test_files.iter() { - let abs_path = files_path.join(relative_path); - - // test helper which calculates relative paths based on root files dir of acc packet - assert!( - matches!(acc_packet.get_relative_path(&abs_path), Ok(p) if &p == relative_path), - "AccountPacket::get_relative_path helper returned invalid path" - ); - - // let's test helper to store tracking info - // use just dummy/invalid metadata and meta-xorname since we won't verify it - let meta_xorname = XorName::random(&mut rng); - acc_packet.store_tracking_info(MetadataTrackingInfo { - file_path: abs_path, - meta_xorname, - metadata: dummy_metadata.clone(), - entry_hash: EntryHash::default(), - })?; - assert!(acc_packet.meta_dir.join(hex::encode(meta_xorname)).exists()); - } - - // let's test helpers to read and remove tracking info - let tracking_info = read_tracking_info_from_disk(&acc_packet.meta_dir)?; - assert_eq!(tracking_info.len(), test_files.len()); - for (abs_path, info) in tracking_info.iter() { - assert!(test_files.remove(abs_path).is_some()); - acc_packet.remove_tracking_info(info.meta_xorname); - assert!(!acc_packet - .meta_dir - .join(hex::encode(info.meta_xorname)) - .exists()); - } - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_from_empty_dir() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpacketempty"); - create_dir_all(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - // let's sync up with the network from the original empty account packet - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - let clone_files_path = tmp_dir.path().join("myaccpacketempty-clone"); - let cloned_acc_packet = AccountPacket::retrieve_folders( - &client, - wallet_dir, - &root_sk, - None, - &clone_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await?; - - // let's verify both the original and cloned packets are empty - check_files_and_dirs_match(&acc_packet, &cloned_acc_packet, BTreeMap::new())?; - check_tracking_info_match(&acc_packet, &cloned_acc_packet, BTreeMap::new())?; - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_upload_download() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpacket"); - let expected_files = create_test_files_on_disk(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - let download_files_path = tmp_dir.path().join("myaccpacket-downloaded"); - - let downloaded_acc_packet = AccountPacket::retrieve_folders( - &client, - wallet_dir, - &root_sk, - None, - &download_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await?; - - check_files_and_dirs_match(&acc_packet, &downloaded_acc_packet, expected_files.clone())?; - check_tracking_info_match(&acc_packet, &downloaded_acc_packet, expected_files)?; - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_scan_files_and_folders_changes() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let files_path = tmp_dir.path().join("myaccpacket-to-scan"); - let mut test_files = create_test_files_on_disk(&files_path)?; - let files_path = files_path.canonicalize()?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &files_path, &root_sk, None)?; - - let changes = acc_packet.scan_files_and_folders_for_changes(false)?; - // verify changes detected - assert_eq!(changes.mutations.len(), 4); - assert!(changes.mutations.iter().all(|mutation| { - matches!(mutation, Mutation::NewFile(i) if i.file_path == files_path.join("file0.txt")) - || matches!(mutation, Mutation::NewFile(i) if i.file_path == files_path.join("dir1").join("file1.txt")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir1")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir2")) - }), "at least one of the mutations detected was unexpected/incorrect"); - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's make some mutations/changes - mutate_test_files_on_disk(&files_path, &mut test_files)?; - - let changes = acc_packet.scan_files_and_folders_for_changes(false)?; - // verify new changes detected - assert_eq!(changes.mutations.len(), 8); - assert!(changes.mutations.iter().all(|mutation| { - matches!(mutation, Mutation::FileContentChanged((_,i)) if i.file_path == files_path.join("file0.txt")) - || matches!(mutation, Mutation::FileRemoved((p, _)) if p == &files_path.join("dir1").join("file1.txt")) - || matches!(mutation, Mutation::FolderRemoved((p,_)) if p == &files_path.join("dir2")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir3")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir3").join("dir3_1")) - || matches!(mutation, Mutation::NewFile(i) if i.file_path == files_path.join("dir3").join("dir3_1").join("file3.txt")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir4")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir4").join("dir4_1")) - }), "at least one of the mutations detected was unexpected/incorrect"); - - Ok(()) - } - - #[ignore = "This test sends out invalid 0 transactions and needs to be fixed"] - #[tokio::test] - async fn test_acc_packet_sync_mutations() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpackettosync"); - let mut expected_files = create_test_files_on_disk(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - let clone_files_path = tmp_dir.path().join("myaccpackettosync-clone"); - let mut cloned_acc_packet = AccountPacket::retrieve_folders( - &client, - wallet_dir, - &root_sk, - None, - &clone_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await?; - - // let's make mutations to the clone: - mutate_test_files_on_disk(&clone_files_path, &mut expected_files)?; - - // and finally, sync the clone up with the network - cloned_acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's sync up with the network from the original account packet to merge - // changes made earlier from the cloned version - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's verify both the original and cloned packets contain the same content - check_files_and_dirs_match(&acc_packet, &cloned_acc_packet, expected_files.clone())?; - check_tracking_info_match(&acc_packet, &cloned_acc_packet, expected_files)?; - - Ok(()) - } - - // Acc-packets can be moved to different locations on local disk without affecting their tracking info. - // We disable this test for Windows since in CI the use of std::fs::rename gives a permissions issue. - #[cfg(any(target_os = "linux", target_os = "linux"))] - #[tokio::test] - async fn test_acc_packet_moved_folder() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpacket-to-move"); - let mut test_files = create_test_files_on_disk(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's make just one mutation before moving the dir to another disk location - let new_chunk = random_file_chunk(); - let file2modify = Path::new("dir1").join("file1.txt"); - OpenOptions::new() - .write(true) - .open(src_files_path.join(&file2modify))? - .write_all(new_chunk.value())?; - test_files.insert(file2modify, Some(new_chunk)); - - // let's now move it to another disk location - let moved_files_path = tmp_dir.path().join("myaccpacket-moved"); - create_dir_all(&moved_files_path)?; - std::fs::rename(src_files_path, &moved_files_path)?; - let moved_files_path = moved_files_path.canonicalize()?; - - let moved_acc_packet = - AccountPacket::from_path(client.clone(), wallet_dir, &moved_files_path, None)?; - - // verify only one change is detected still after moved to another location on disk - let changes = moved_acc_packet.scan_files_and_folders_for_changes(false)?; - assert_eq!(changes.mutations.len(), 1); - assert_eq!(changes.mutations.first().map(|mutation| { - matches!(mutation, Mutation::FileContentChanged((_,i)) if i.file_path == moved_files_path.join("dir1").join("file1.txt")) - }), Some(true)); - - check_tracking_info_match(&moved_acc_packet, &moved_acc_packet, test_files)?; - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_derived_address() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let files_path = tmp_dir.path().join("myaccpacket-unencrypted-metadata"); - let _ = create_test_files_on_disk(&files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &files_path, &root_sk, None)?; - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // try to download Folder with a different root SK should fail since it - // will derive a different addresse than the one used for creating it - let download_files_path = tmp_dir.path().join("myaccpacket-downloaded"); - let other_root_sk = MainSecretKey::random(); - - if AccountPacket::retrieve_folders( - &client, - wallet_dir, - &other_root_sk, - None, - &download_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await - .is_ok() - { - bail!("acc-packet retrieval succeeded unexpectedly"); - } - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_recovery_seed_encryption() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - - // let's first test with unencrypted recovery seed - let src_files_path = tmp_dir.path().join("myaccpacket_unencrypted_seed"); - create_dir_all(&src_files_path)?; - let _ = AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - let _ = AccountPacket::from_path(client.clone(), wallet_dir, &src_files_path, None)?; - - let bytes = std::fs::read( - src_files_path - .join(SAFE_TRACKING_CHANGES_DIR) - .join(RECOVERY_SEED_FILENAME), - )?; - assert_eq!(bytes, root_sk.to_bytes()); - - if AccountPacket::from_path( - client.clone(), - wallet_dir, - &src_files_path, - Some(b"123456789"), - ) - .is_ok() - { - bail!("acc-packet loading with a password succeeded unexpectedly"); - } - - // let's now test with encrypted recovery seed - let src_files_path = tmp_dir.path().join("myaccpacket_encrypted_seed"); - create_dir_all(&src_files_path)?; - let mut rng = thread_rng(); - let password: [u8; 32] = rng.gen(); - let incorrect_password: [u8; 32] = rng.gen(); - - let _ = AccountPacket::init( - client.clone(), - wallet_dir, - &src_files_path, - &root_sk, - Some(&password), - )?; - - if AccountPacket::from_path(client.clone(), wallet_dir, &src_files_path, None).is_ok() { - bail!("acc-packet loading without a password succeeded unexpectedly"); - } - - if AccountPacket::from_path( - client.clone(), - wallet_dir, - &src_files_path, - Some(&incorrect_password), - ) - .is_ok() - { - bail!("acc-packet loading with incorrect password succeeded unexpectedly"); - } - - let _ = - AccountPacket::from_path(client.clone(), wallet_dir, &src_files_path, Some(&password))?; - - let bytes = std::fs::read( - src_files_path - .join(SAFE_TRACKING_CHANGES_DIR) - .join(RECOVERY_SEED_FILENAME), - )?; - assert!(!bytes.is_empty()); - assert_ne!(bytes, root_sk.to_bytes()); - - Ok(()) - } - - // Helpers functions to generate and verify test data - - // Create a hard-coded set of test files and dirs on disk - fn create_test_files_on_disk(base_path: &Path) -> Result>> { - // let's create a hierarchy with dirs and files with random content - let mut files = BTreeMap::new(); - files.insert( - Path::new("file0.txt").to_path_buf(), - Some(random_file_chunk()), - ); - files.insert( - Path::new("dir1").join("file1.txt"), - Some(random_file_chunk()), - ); - files.insert(Path::new("dir2").to_path_buf(), None); - - for (path, chunk) in files.iter() { - let full_path = base_path.join(path); - if let Some(chunk) = chunk { - // it's a file, thus we create it and store its chunk bytes - create_dir_all(full_path.parent().expect("invalid path for test file"))?; - let mut file = File::create(full_path)?; - file.write_all(chunk.value())?; - } else { - // it's a dir, and it shall be empty - create_dir_all(full_path)?; - } - } - Ok(files) - } - - // Apply a hard-coded set of mutations to test files and dirs on disk - fn mutate_test_files_on_disk( - path: &Path, - test_files: &mut BTreeMap>, - ) -> Result<()> { - // - modify the content of a file - let new_chunk = random_file_chunk(); - let file2modify = Path::new("file0.txt"); - OpenOptions::new() - .write(true) - .open(path.join(file2modify))? - .write_all(new_chunk.value())?; - test_files.insert(file2modify.to_path_buf(), Some(new_chunk)); - // - remove one of the files - let file2remove = Path::new("dir1").join("file1.txt"); - remove_file(path.join(&file2remove))?; - test_files.remove(&file2remove); - // we need to keep the empty dir within the list of expected files though - test_files.insert(Path::new("dir1").to_path_buf(), None); - // - remove one of the dirs - let dir2remove = Path::new("dir2"); - remove_dir_all(path.join(dir2remove))?; - test_files.remove(dir2remove); - // - create new file within subdirs - create_dir_all(path.join("dir3").join("dir3_1"))?; - let file2create = Path::new("dir3").join("dir3_1").join("file3.txt"); - let mut file = File::create(path.join(&file2create))?; - let new_chunk = random_file_chunk(); - file.write_all(new_chunk.value())?; - test_files.insert(file2create, Some(new_chunk)); - // - create new subdirs - let dir2create = Path::new("dir4").join("dir4_1"); - create_dir_all(path.join(&dir2create))?; - test_files.insert(dir2create.to_path_buf(), None); - - Ok(()) - } - - // Helper to check if a dir is empty - fn is_empty_dir(path: &Path) -> bool { - path.read_dir() - .map(|mut i| i.next().is_none()) - .unwrap_or(false) - } - - // Collect list of files and empty dirs, to be used for comparing in tests - fn list_of_files_and_empty_dirs(acc_packet: &AccountPacket) -> BTreeSet { - acc_packet - .iter_only_files() - .chain(acc_packet.iter_only_dirs()) - .flat_map(|file_entry| { - let path = file_entry.path(); - if path.is_dir() && !is_empty_dir(path) { - bail!("we skip non empty dirs"); - } - - acc_packet.get_relative_path(path) - }) - .collect() - } - - // Check both acc packets kept the same set of tracking information locally - fn check_tracking_info_match( - src_packet: &AccountPacket, - target_packet: &AccountPacket, - mut expected_files: BTreeMap>, - ) -> Result<()> { - let root_addr = src_packet.root_folder_addr(); - assert_eq!( - read_root_folder_addr(&src_packet.meta_dir)?, - (root_addr, true), - "Root folder address doesn't match in source directory tracking info." - ); - assert_eq!( - read_root_folder_addr(&target_packet.meta_dir)?, - (root_addr, true), - "Root folder address doesn't match in target directory tracking info." - ); - - let src_tracking_info = read_tracking_info_from_disk(&src_packet.meta_dir)?; - let mut target_tracking_info = read_tracking_info_from_disk(&target_packet.meta_dir)?; - - for (path, src_tracking_info) in src_tracking_info { - match target_tracking_info.remove(&path) { - None => { - bail!("Tracking info found in source is missing in target directory for file/dir: {path:?}") - } - Some(info) => { - if info != src_tracking_info { - bail!("Different tracking info kept in source and target for file/dir: {path:?}"); - } - } - } - - let abs_path = src_packet.files_dir.join(&path); - if abs_path.is_dir() { - assert_eq!(src_tracking_info.file_path, path, - "Incorrect path in tracking info found in source and target directories for dir: {path:?}"); - assert!(matches!(src_tracking_info.metadata.content, FolderEntry::Folder(_)), - "Incorrect tracking info found in source and target directories for dir: {path:?}"); - // if it's an empty dir we shall find it in the list of expected files - if is_empty_dir(&abs_path) { - let _ = expected_files.remove(&path).ok_or_else(|| { - eyre!( - "Unexpected tracking info found on source and target directories for dir: {path:?}" - ) - })?; - } - } else { - let chunk = expected_files.remove(&path).ok_or_else(|| { - eyre!( - "Unexpected tracking info found on source and target directories for file: {path:?}" - ) - })?; - - if chunk.is_some() { - assert!(matches!(src_tracking_info.metadata.content, FolderEntry::File(_)), - "Tracking info found in source and target directories don't match the file: {path:?}"); - } else { - assert!(matches!(src_tracking_info.metadata.content, FolderEntry::Folder(_)), - "Tracking info found in source and target directories don't match the dir: {path:?}"); - } - } - } - - if !target_tracking_info.is_empty() { - bail!("Tracking info found in target directory but missing in source directory: {target_tracking_info:?}"); - } - if !expected_files.is_empty() { - bail!("Some expected file/dir/s are lacking their tracking info in source or target directories: {expected_files:?}"); - } - - Ok(()) - } - - // Check both dirs have the same set of files and folders and no more - fn check_files_and_dirs_match( - src_packet: &AccountPacket, - target_packet: &AccountPacket, - mut expected_files: BTreeMap>, - ) -> Result<()> { - // let's collect all paths in target acc packet, i.e. files and empty dirs paths - let mut target_packet_files: BTreeSet = - list_of_files_and_empty_dirs(target_packet); - - // let's now compare those paths in target acc packet with those in source acc packet - for relative_path in list_of_files_and_empty_dirs(src_packet) { - if !target_packet_files.remove(&relative_path) { - bail!("File/dir found in source is missing in target directory: {relative_path:?}"); - } - - let src_path = src_packet.files_dir.join(&relative_path); - let target_path = target_packet.files_dir.join(&relative_path); - - let chunk = expected_files.remove(&relative_path).ok_or_else(|| { - eyre!("Unexpected file/dir found on source and target directories: {src_path:?}") - })?; - - if let Some(chunk) = chunk { - // it's a file, let's compare their content - let mut src_file = File::open(&src_path) - .map_err(|err| eyre!("couldn't open source file {src_path:?}: {err:?}"))?; - let mut target_file = File::open(&target_path) - .map_err(|err| eyre!("couldn't open target file {target_path:?}: {err:?}"))?; - - let mut src_content = Vec::new(); - src_file - .read_to_end(&mut src_content) - .expect("couldn't read source file"); - let mut target_content = Vec::new(); - target_file - .read_to_end(&mut target_content) - .expect("couldn't read target file"); - - assert_eq!( - src_content, - chunk.value().slice(..), - "source file content doesn't match with expected" - ); - assert_eq!( - target_content, - chunk.value().slice(..), - "target file content doesn't match with expected" - ); - } else { - // it's a dir, let's check they exist as dirs - assert!(src_path.is_dir(), "source path is not a dir {src_path:?}"); - assert!( - target_path.is_dir(), - "target path is not a dir {target_path:?}" - ); - } - } - - if !target_packet_files.is_empty() { - bail!("File/dir/s found in target directory but missing in source directory: {target_packet_files:?}"); - } - if !expected_files.is_empty() { - bail!("Some expected file/dir/s were not found in source or target directories: {expected_files:?}"); - } - - Ok(()) - } -} diff --git a/sn_cli/src/acc_packet/change_tracking.rs b/sn_cli/src/acc_packet/change_tracking.rs deleted file mode 100644 index a2eba85270..0000000000 --- a/sn_cli/src/acc_packet/change_tracking.rs +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use sn_client::{ - protocol::storage::RegisterAddress, registers::EntryHash, transfers::MainSecretKey, FoldersApi, - Metadata, -}; - -use aes::Aes256; -use block_modes::{block_padding::Pkcs7, BlockMode, Cbc}; -use bls::{SecretKey, SK_SIZE}; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use rand::Rng; -use serde::{Deserialize, Serialize}; -use std::{ - collections::BTreeMap, - fmt, - fs::{create_dir_all, File}, - io::Write, - path::{Path, PathBuf}, -}; -use tiny_keccak::{Hasher, Sha3}; -use walkdir::WalkDir; -use xor_name::XorName; - -// AES used to encrypt/decrypt the cached recovery seed. -type Aes256Cbc = Cbc; - -// AES Initialisation Vector length used. -const IV_LENGTH: usize = 16; - -// Length of buffers used for AES encryption/decryption. -const AES_BUFFER_LENGTH: usize = 48; - -// Name of hidden folder where tracking information and metadata is locally stored. -pub(super) const SAFE_TRACKING_CHANGES_DIR: &str = ".safe"; - -// Subfolder where files metadata will be cached -pub(super) const METADATA_CACHE_DIR: &str = "metadata"; - -// Name of the file where metadata about root folder is locally cached. -pub(super) const ROOT_FOLDER_METADATA_FILENAME: &str = "root_folder.addr"; - -// Name of the file where the recovery secret/seed is locally cached. -pub(crate) const RECOVERY_SEED_FILENAME: &str = "recovery_seed"; - -// Container to keep track in memory what changes are detected in local Folders hierarchy and files. -pub(super) type Folders = BTreeMap; - -// Type of local changes detected to a Folder -#[derive(Clone, Debug, PartialEq)] -pub(super) enum FolderChange { - NoChange, - NewFolder, - NewEntries, -} - -impl FolderChange { - /// Returns true if it's currently set to NewFolder. - pub fn is_new_folder(&self) -> bool { - self == &Self::NewFolder - } - - /// If it's currently set to NoChange then switch it to NewEntries. - /// Otherwise we don't need to change it as the entire Folder will need to be uploaded. - pub fn has_new_entries(&mut self) { - if self == &Self::NoChange { - *self = Self::NewEntries; - } - } -} - -// Changes detected locally which eventually can be applied and upload to network. -#[derive(Default)] -pub(super) struct ChangesToApply { - pub folders: Folders, - pub mutations: Vec, -} - -// Type of mutation detected locally. -#[derive(Debug)] -pub(super) enum Mutation { - NewFile(MetadataTrackingInfo), - FileRemoved((PathBuf, XorName)), - FileContentChanged((XorName, MetadataTrackingInfo)), - NewFolder(MetadataTrackingInfo), - FolderRemoved((PathBuf, XorName)), -} - -impl fmt::Display for Mutation { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::NewFile(tracking_info) => { - write!(f, "New file: {:?}", tracking_info.file_path) - } - Self::FileRemoved((path, _)) => write!(f, "File removed: {path:?}"), - Self::FileContentChanged((_, tracking_info)) => { - write!(f, "File content changed: {:?}", tracking_info.file_path) - } - Self::NewFolder(tracking_info) => { - write!(f, "New folder: {:?}", tracking_info.file_path) - } - Self::FolderRemoved((path, _)) => write!(f, "Folder removed: {path:?}"), - } - } -} - -// Information stored locally to keep track of local changes to files/folders. -// TODO: to make file changes discovery more efficient, and prevent chunking for -// such purposes, add more info like file size and last modified timestamp. -#[derive(Debug, Serialize, Deserialize, PartialEq)] -pub(super) struct MetadataTrackingInfo { - pub file_path: PathBuf, - pub meta_xorname: XorName, - pub metadata: Metadata, - pub entry_hash: EntryHash, -} - -// Build absolute paths for the different dirs to be used for locally tracking changes -pub(super) fn build_tracking_info_paths(path: &Path) -> Result<(PathBuf, PathBuf, PathBuf)> { - let files_dir = path.to_path_buf().canonicalize()?; - let tracking_info_dir = files_dir.join(SAFE_TRACKING_CHANGES_DIR); - let meta_dir = tracking_info_dir.join(METADATA_CACHE_DIR); - create_dir_all(&meta_dir) - .map_err(|err| eyre!("The path provided needs to be a directory: {err}"))?; - - Ok((files_dir, tracking_info_dir, meta_dir)) -} - -pub(super) fn read_tracking_info_from_disk( - meta_dir: &Path, -) -> Result> { - let mut curr_tracking_info = BTreeMap::new(); - for entry in WalkDir::new(meta_dir) - .into_iter() - .flatten() - .filter(|e| e.file_type().is_file() && e.file_name() != ROOT_FOLDER_METADATA_FILENAME) - { - let path = entry.path(); - let bytes = std::fs::read(path) - .map_err(|err| eyre!("Error while reading the tracking info from {path:?}: {err}"))?; - let tracking_info: MetadataTrackingInfo = rmp_serde::from_slice(&bytes) - .map_err(|err| eyre!("Error while deserializing tracking info from {path:?}: {err}"))?; - - curr_tracking_info.insert(tracking_info.file_path.clone(), tracking_info); - } - - Ok(curr_tracking_info) -} - -// Store tracking info about the root folder in a file to keep track of any changes made -pub(super) fn store_root_folder_tracking_info( - meta_dir: &Path, - root_folder_addr: RegisterAddress, - created: bool, -) -> Result<()> { - let path = meta_dir.join(ROOT_FOLDER_METADATA_FILENAME); - let mut meta_file = File::create(path)?; - meta_file.write_all(&rmp_serde::to_vec(&(root_folder_addr, created))?)?; - - Ok(()) -} - -// Store the given root seed/SK on disk, (optionally) encrypted with a password -pub(super) fn store_root_sk( - dir: &Path, - root_sk: &MainSecretKey, - password: Option<&[u8]>, -) -> Result<()> { - let path = dir.join(RECOVERY_SEED_FILENAME); - let mut secret_file = File::create(path)?; - let seed_bytes = root_sk.to_bytes(); - - if let Some(pwd) = password { - // encrypt the SK with the (hashed) password - let key = encryption_key_from_hashed_password(pwd); - - let pos = seed_bytes.len(); - let mut buffer = [0u8; AES_BUFFER_LENGTH]; - buffer[..pos].copy_from_slice(&seed_bytes); - - // IV is randomly chosen and prefixed it to cipher - let mut rng = rand::thread_rng(); - let random_iv: [u8; IV_LENGTH] = rng.gen(); - let mut iv_with_cipher = vec![]; - iv_with_cipher.extend(random_iv); - - let cipher = Aes256Cbc::new_from_slices(&key, &random_iv)?; - let ciphertext = cipher.encrypt(&mut buffer, pos)?; - iv_with_cipher.extend(ciphertext); - - secret_file.write_all(&iv_with_cipher)?; - } else { - secret_file.write_all(&seed_bytes)?; - } - - Ok(()) -} - -// Read the root seed/SK from disk, (optionally) decrypting it with a password -pub(super) fn read_root_sk(dir: &Path, password: Option<&[u8]>) -> Result { - let path = dir.join(RECOVERY_SEED_FILENAME); - let mut bytes = std::fs::read(&path).map_err(|err| { - eyre!("Error while reading the recovery seed/secret from {path:?}: {err:?}") - })?; - - if let Some(pwd) = password { - // decrypt the SK with the (hashed) password - if bytes.len() < IV_LENGTH + AES_BUFFER_LENGTH { - bail!( - "Not enough bytes found on disk ({}) to decrypt the recovery seed", - bytes.len() - ); - } - - // the IV is prefixed - let mut iv = [0u8; IV_LENGTH]; - iv[..IV_LENGTH].copy_from_slice(&bytes[..IV_LENGTH]); - - let mut buffer = [0u8; AES_BUFFER_LENGTH]; - buffer[..48].copy_from_slice(&bytes[IV_LENGTH..]); - - let key = encryption_key_from_hashed_password(pwd); - let cipher = Aes256Cbc::new_from_slices(&key, &iv)?; - bytes = cipher - .decrypt_vec(&buffer) - .map_err(|_| eyre!("Failed to decrypt the recovery seed with the provided password"))?; - } - - if bytes.len() != SK_SIZE { - bail!( - "The length of bytes read from disk ({}) doesn't match a recovery seed's length ({SK_SIZE})", bytes.len() - ); - } - let mut seed_bytes = [0u8; SK_SIZE]; - seed_bytes[..SK_SIZE].copy_from_slice(&bytes); - let sk = MainSecretKey::new(SecretKey::from_bytes(seed_bytes)?); - - Ok(sk) -} - -fn encryption_key_from_hashed_password(password: &[u8]) -> [u8; 32] { - let mut key = [0; 32]; - let mut hasher = Sha3::v256(); - hasher.update(password); - hasher.finalize(&mut key); - key -} - -// Read the tracking info about the root folder -pub(super) fn read_root_folder_addr(meta_dir: &Path) -> Result<(RegisterAddress, bool)> { - let path = meta_dir.join(ROOT_FOLDER_METADATA_FILENAME); - let bytes = std::fs::read(&path) - .map_err(|err| eyre!("Error while reading the tracking info from {path:?}: {err:?}"))?; - - Ok(rmp_serde::from_slice(&bytes)?) -} diff --git a/sn_cli/src/bin/main.rs b/sn_cli/src/bin/main.rs deleted file mode 100644 index 0ac03d458b..0000000000 --- a/sn_cli/src/bin/main.rs +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#[macro_use] -extern crate tracing; - -mod subcommands; - -use subcommands::{ - files::files_cmds, - folders::folders_cmds, - register::register_cmds, - wallet::{ - hot_wallet::{wallet_cmds, wallet_cmds_without_client, WalletCmds}, - wo_wallet::{wo_wallet_cmds, wo_wallet_cmds_without_client, WatchOnlyWalletCmds}, - }, - Opt, SubCmd, -}; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::Result; -use indicatif::ProgressBar; -use sn_client::transfers::bls_secret_from_hex; -use sn_client::{Client, ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver}; -#[cfg(feature = "metrics")] -use sn_logging::{metrics::init_metrics, Level, LogBuilder, LogFormat}; -use std::{io, path::PathBuf, time::Duration}; -use tokio::{sync::broadcast::error::RecvError, task::JoinHandle}; - -const CLIENT_KEY: &str = "clientkey"; - -#[tokio::main] -async fn main() -> Result<()> { - color_eyre::install()?; - let opt = Opt::parse(); - let logging_targets = vec![ - // TODO: Reset to nice and clean defaults once we have a better idea of what we want - ("sn_networking".to_string(), Level::INFO), - ("safe".to_string(), Level::TRACE), - ("sn_build_info".to_string(), Level::TRACE), - ("autonomi".to_string(), Level::TRACE), - ("sn_client".to_string(), Level::TRACE), - ("sn_logging".to_string(), Level::TRACE), - ("sn_peers_acquisition".to_string(), Level::TRACE), - ("sn_protocol".to_string(), Level::TRACE), - ("sn_registers".to_string(), Level::TRACE), - ("sn_transfers".to_string(), Level::TRACE), - ]; - let mut log_builder = LogBuilder::new(logging_targets); - log_builder.output_dest(opt.log_output_dest); - log_builder.format(opt.log_format.unwrap_or(LogFormat::Default)); - let _log_handles = log_builder.initialize()?; - - #[cfg(feature = "metrics")] - tokio::spawn(init_metrics(std::process::id())); - - // Log the full command that was run - info!("\"{}\"", std::env::args().collect::>().join(" ")); - - debug!( - "safe client built with git version: {}", - sn_build_info::git_info() - ); - println!( - "safe client built with git version: {}", - sn_build_info::git_info() - ); - - let client_data_dir_path = get_client_data_dir_path()?; - // Perform actions that do not require us connecting to the network and return early - if let SubCmd::Wallet(cmds) = &opt.cmd { - if let WalletCmds::Address { .. } - | WalletCmds::Balance { .. } - | WalletCmds::Create { .. } - | WalletCmds::Sign { .. } - | WalletCmds::Status { .. } - | WalletCmds::Encrypt { .. } = cmds - { - wallet_cmds_without_client(cmds, &client_data_dir_path).await?; - return Ok(()); - } - } - - if let SubCmd::WatchOnlyWallet(cmds) = &opt.cmd { - if let WatchOnlyWalletCmds::Addresses - | WatchOnlyWalletCmds::Balance { .. } - | WatchOnlyWalletCmds::Deposit { .. } - | WatchOnlyWalletCmds::Create { .. } - | WatchOnlyWalletCmds::Transaction { .. } = cmds - { - wo_wallet_cmds_without_client(cmds, &client_data_dir_path).await?; - return Ok(()); - } - } - - println!("Instantiating a SAFE client..."); - let secret_key = get_client_secret_key(&client_data_dir_path)?; - - let bootstrap_peers = opt.peers.get_peers().await?; - - println!( - "Connecting to the network with {} peers", - bootstrap_peers.len(), - ); - - let bootstrap_peers = if bootstrap_peers.is_empty() { - // empty vec is returned if `local-discovery` flag is provided - None - } else { - Some(bootstrap_peers) - }; - - // get the broadcaster as we want to have our own progress bar. - let broadcaster = ClientEventsBroadcaster::default(); - let (progress_bar, progress_bar_handler) = - spawn_connection_progress_bar(broadcaster.subscribe()); - - let result = Client::new( - secret_key, - bootstrap_peers, - opt.connection_timeout, - Some(broadcaster), - ) - .await; - let client = match result { - Ok(client) => client, - Err(err) => { - // clean up progress bar - progress_bar.finish_with_message("Could not connect to the network"); - return Err(err.into()); - } - }; - progress_bar_handler.await?; - - // default to verifying storage - let should_verify_store = !opt.no_verify; - - // PowerShell seems having issue to showing the unwrapped error - // Hence capture the result and print it out explicity. - let cmd_str = format!("{:?}", opt.cmd); - let result = match opt.cmd { - SubCmd::Wallet(cmds) => { - wallet_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - SubCmd::WatchOnlyWallet(cmds) => { - wo_wallet_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - SubCmd::Files(cmds) => { - files_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - SubCmd::Folders(cmds) => { - folders_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - SubCmd::Register(cmds) => { - register_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - }; - println!("Completed with {result:?} of execute {cmd_str:?}"); - - Ok(()) -} - -/// Helper to subscribe to the client events broadcaster and spin up a progress bar that terminates when the -/// client successfully connects to the network or if it errors out. -fn spawn_connection_progress_bar(mut rx: ClientEventsReceiver) -> (ProgressBar, JoinHandle<()>) { - // Network connection progress bar - let progress_bar = ProgressBar::new_spinner(); - let progress_bar_clone = progress_bar.clone(); - progress_bar.enable_steady_tick(Duration::from_millis(120)); - progress_bar.set_message("Connecting to The SAFE Network..."); - let new_style = progress_bar.style().tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈🔗"); - progress_bar.set_style(new_style); - - progress_bar.set_message("Connecting to The SAFE Network..."); - - let handle = tokio::spawn(async move { - let mut peers_connected = 0; - loop { - match rx.recv().await { - Ok(ClientEvent::ConnectedToNetwork) => { - progress_bar.finish_with_message("Connected to the Network"); - break; - } - Ok(ClientEvent::PeerAdded { - max_peers_to_connect, - }) => { - peers_connected += 1; - progress_bar.set_message(format!( - "{peers_connected}/{max_peers_to_connect} initial peers found.", - )); - } - Err(RecvError::Lagged(_)) => { - // Even if the receiver is lagged, we would still get the ConnectedToNetwork during each new - // connection. Thus it would be okay to skip this error. - } - Err(RecvError::Closed) => { - progress_bar.finish_with_message("Could not connect to the network"); - break; - } - _ => {} - } - } - }); - (progress_bar_clone, handle) -} - -fn get_client_secret_key(root_dir: &PathBuf) -> Result { - // create the root directory if it doesn't exist - std::fs::create_dir_all(root_dir)?; - let key_path = root_dir.join(CLIENT_KEY); - let secret_key = if key_path.is_file() { - info!("Client key found. Loading from file..."); - let secret_hex_bytes = std::fs::read(key_path)?; - bls_secret_from_hex(secret_hex_bytes)? - } else { - info!("No key found. Generating a new client key..."); - let secret_key = SecretKey::random(); - std::fs::write(key_path, hex::encode(secret_key.to_bytes()))?; - secret_key - }; - Ok(secret_key) -} - -fn get_client_data_dir_path() -> Result { - let mut home_dirs = dirs_next::data_dir().expect("Data directory is obtainable"); - home_dirs.push("safe"); - home_dirs.push("client"); - std::fs::create_dir_all(home_dirs.as_path())?; - Ok(home_dirs) -} - -fn get_stdin_response(prompt: &str) -> String { - println!("{prompt}"); - let mut buffer = String::new(); - let stdin = io::stdin(); - if stdin.read_line(&mut buffer).is_err() { - // consider if error should process::exit(1) here - return "".to_string(); - }; - // Remove leading and trailing whitespace - buffer.trim().to_owned() -} - -fn get_stdin_password_response(prompt: &str) -> String { - rpassword::prompt_password(prompt) - .map(|v| v.trim().to_owned()) - .unwrap_or("".to_string()) -} - -#[cfg(test)] -mod tests { - use crate::subcommands::wallet::hot_wallet::{wallet_cmds_without_client, WalletCmds}; - use crate::subcommands::wallet::WalletApiHelper; - use bls::SecretKey; - use color_eyre::Result; - use sn_client::acc_packet::{load_or_create_mnemonic, secret_key_from_mnemonic}; - use sn_client::transfers::HotWallet; - use std::path::Path; - - fn create_wallet(root_dir: &Path, derivation_passphrase: Option) -> Result { - let mnemonic = load_or_create_mnemonic(root_dir)?; - let secret_key = secret_key_from_mnemonic(mnemonic, derivation_passphrase)?; - let wallet = HotWallet::create_from_key(root_dir, secret_key, None)?; - Ok(wallet) - } - - #[tokio::test] - async fn test_wallet_address_command() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let root_dir = tmp_dir.path().to_path_buf(); - - // Create wallet - let _wallet = create_wallet(&root_dir, None).expect("Could not create wallet"); - - let cmds = WalletCmds::Address; - - let result = wallet_cmds_without_client(&cmds, &root_dir).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_wallet_address_command_should_fail_with_no_existing_wallet() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let client_data_dir = tmp_dir.path().to_path_buf(); - - let cmds = WalletCmds::Address; - - // Runs command without a wallet being present, thus should fail - let result = wallet_cmds_without_client(&cmds, &client_data_dir).await; - assert!(result.is_err()); - } - - #[tokio::test] - async fn test_wallet_create_command() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let root_dir = tmp_dir.path().to_path_buf(); - - let cmds = WalletCmds::Create { - no_replace: false, - no_password: true, - key: None, - derivation_passphrase: None, - password: None, - }; - - // Run command and hopefully create a wallet - let result = wallet_cmds_without_client(&cmds, &root_dir).await; - assert!(result.is_ok()); - - // Check if valid wallet exists - let result = WalletApiHelper::load_from(&root_dir); - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_wallet_create_command_with_hex_key() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let root_dir = tmp_dir.path().to_path_buf(); - - let secret_key = SecretKey::random(); - let secret_key_hex = secret_key.to_hex(); - - let cmds = WalletCmds::Create { - no_replace: false, - no_password: true, - key: Some(secret_key_hex), - derivation_passphrase: None, - password: None, - }; - - // Run command and hopefully create a wallet - let result = wallet_cmds_without_client(&cmds, &root_dir).await; - assert!(result.is_ok()); - - // Check if valid wallet exists - let result = WalletApiHelper::load_from(&root_dir); - assert!(result.is_ok()); - - if let WalletApiHelper::HotWallet(wallet) = result.expect("No valid wallet found") { - // Compare public addresses (secret keys are the same if the public addresses are) - assert_eq!(wallet.address().to_hex(), secret_key.public_key().to_hex()); - } else { - panic!("Did not expect a watch only wallet"); - } - } -} diff --git a/sn_cli/src/bin/subcommands/files.rs b/sn_cli/src/bin/subcommands/files.rs deleted file mode 100644 index 2bc3a26fed..0000000000 --- a/sn_cli/src/bin/subcommands/files.rs +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use clap::Parser; -use color_eyre::{ - eyre::{bail, eyre}, - Help, Result, -}; -use sn_cli::{ - download_file, download_files, ChunkManager, Estimator, FilesUploader, UploadedFile, - UPLOADED_FILES, -}; -use sn_client::{ - protocol::storage::{Chunk, ChunkAddress, RetryStrategy}, - UploadCfg, -}; -use sn_client::{Client, FilesApi, BATCH_SIZE}; -use std::{ - ffi::OsString, - path::{Path, PathBuf}, -}; -use walkdir::WalkDir; -use xor_name::XorName; - -#[derive(Parser, Debug)] -pub enum FilesCmds { - Estimate { - /// The location of the file(s) to upload. Can be a file or a directory. - #[clap(name = "path", value_name = "PATH")] - path: PathBuf, - /// Should the file be made accessible to all. (This is irreversible) - #[clap(long, name = "make_public", default_value = "false", short = 'p')] - make_data_public: bool, - }, - Upload { - /// The location of the file(s) to upload. - /// - /// Can be a file or a directory. - #[clap(name = "path", value_name = "PATH")] - file_path: PathBuf, - /// The batch_size to split chunks into parallel handling batches - /// during payment and upload processing. - #[clap(long, default_value_t = BATCH_SIZE, short='b')] - batch_size: usize, - /// Should the file be made accessible to all. (This is irreversible) - #[clap(long, name = "make_public", default_value = "false", short = 'p')] - make_data_public: bool, - /// Set the strategy to use on chunk upload failure. Does not modify the spend failure retry attempts yet. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Quick, short = 'r', help = "Sets the retry strategy on upload failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, - Download { - /// The name to apply to the downloaded file. - /// - /// If the name argument is used, the address argument must also be supplied. - /// - /// If neither are, all the files uploaded by the current user will be downloaded again. - #[clap(name = "name")] - file_name: Option, - /// The hex address of a file. - /// - /// If the address argument is used, the name argument must also be supplied. - /// - /// If neither are, all the files uploaded by the current user will be downloaded again. - #[clap(name = "address")] - file_addr: Option, - /// Flagging whether to show the holders of the uploaded chunks. - /// Default to be not showing. - #[clap(long, name = "show_holders", default_value = "false")] - show_holders: bool, - /// The batch_size for parallel downloading - #[clap(long, default_value_t = BATCH_SIZE , short='b')] - batch_size: usize, - /// Set the strategy to use on downloads failure. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Quick, short = 'r', help = "Sets the retry strategy on download failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, -} - -pub(crate) async fn files_cmds( - cmds: FilesCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - FilesCmds::Estimate { - path, - make_data_public, - } => { - let files_api = FilesApi::build(client.clone(), root_dir.to_path_buf())?; - let chunk_manager = ChunkManager::new(root_dir); - Estimator::new(chunk_manager, files_api) - .estimate_cost(path, make_data_public, root_dir) - .await? - } - FilesCmds::Upload { - file_path, - batch_size, - retry_strategy, - make_data_public, - } => { - let files_count = count_files_in_path_recursively(&file_path); - - if files_count == 0 { - if file_path.is_dir() { - bail!( - "The directory specified for upload is empty. \ - Please verify the provided path." - ); - } else { - bail!("The provided file path is invalid. Please verify the path."); - } - } - let upload_cfg = UploadCfg { - batch_size, - verify_store, - retry_strategy, - ..Default::default() - }; - let files_uploader = FilesUploader::new(client.clone(), root_dir.to_path_buf()) - .set_make_data_public(make_data_public) - .set_upload_cfg(upload_cfg) - .insert_path(&file_path); - - let _summary = files_uploader.start_upload().await?; - } - FilesCmds::Download { - file_name, - file_addr, - show_holders, - batch_size, - retry_strategy, - } => { - if (file_name.is_some() && file_addr.is_none()) - || (file_addr.is_some() && file_name.is_none()) - { - return Err( - eyre!("Both the name and address must be supplied if either are used") - .suggestion( - "Please run the command again in the form 'files download
'", - ), - ); - } - - let mut download_dir = root_dir.to_path_buf(); - let mut download_file_name = file_name.clone(); - if let Some(file_name) = file_name { - // file_name may direct the downloaded data to: - // - // the current directory (just a filename) - // eg safe files download myfile.txt ADDRESS - // - // a directory relative to the current directory (relative filename) - // eg safe files download my/relative/path/myfile.txt ADDRESS - // - // a directory relative to root of the filesystem (absolute filename) - // eg safe files download /home/me/mydir/myfile.txt ADDRESS - let file_name_path = Path::new(&file_name); - if file_name_path.is_dir() { - return Err(eyre!("Cannot download file to path: {:?}", file_name)); - } - let file_name_dir = file_name_path.parent(); - if file_name_dir.is_none() { - // just a filename, use the current_dir - download_dir = std::env::current_dir().unwrap_or(root_dir.to_path_buf()); - } else if file_name_path.is_relative() { - // relative to the current directory. Make the relative path - // into an absolute path by joining it to current_dir - if let Some(relative_dir) = file_name_dir { - let current_dir = std::env::current_dir().unwrap_or(root_dir.to_path_buf()); - download_dir = current_dir.join(relative_dir); - if !download_dir.exists() { - return Err(eyre!("Directory does not exist: {:?}", download_dir)); - } - if let Some(path_file_name) = file_name_path.file_name() { - download_file_name = Some(OsString::from(path_file_name)); - } - } - } else { - // absolute dir - download_dir = file_name_dir.unwrap_or(root_dir).to_path_buf(); - } - } - let files_api: FilesApi = FilesApi::new(client.clone(), download_dir.clone()); - - match (download_file_name, file_addr) { - (Some(download_file_name), Some(address_provided)) => { - let bytes = - hex::decode(&address_provided).expect("Input address is not a hex string"); - let xor_name_provided = XorName( - bytes - .try_into() - .expect("Failed to parse XorName from hex string"), - ); - // try to read the data_map if it exists locally. - let uploaded_files_path = root_dir.join(UPLOADED_FILES); - let expected_data_map_location = uploaded_files_path.join(address_provided); - let local_data_map = { - if expected_data_map_location.exists() { - let uploaded_file_metadata = - UploadedFile::read(&expected_data_map_location)?; - - uploaded_file_metadata.data_map.map(|bytes| Chunk { - address: ChunkAddress::new(xor_name_provided), - value: bytes, - }) - } else { - None - } - }; - - download_file( - files_api, - xor_name_provided, - (download_file_name, local_data_map), - &download_dir, - show_holders, - batch_size, - retry_strategy, - ) - .await - } - _ => { - println!("Attempting to download all files uploaded by the current user..."); - download_files( - &files_api, - root_dir, - show_holders, - batch_size, - retry_strategy, - ) - .await? - } - } - } - } - Ok(()) -} - -fn count_files_in_path_recursively(file_path: &PathBuf) -> u32 { - let entries_iterator = WalkDir::new(file_path).into_iter().flatten(); - let mut count = 0; - - entries_iterator.for_each(|entry| { - if entry.file_type().is_file() { - count += 1; - } - }); - count -} diff --git a/sn_cli/src/bin/subcommands/folders.rs b/sn_cli/src/bin/subcommands/folders.rs deleted file mode 100644 index 705b746459..0000000000 --- a/sn_cli/src/bin/subcommands/folders.rs +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use sn_cli::AccountPacket; - -use sn_client::{ - protocol::storage::RetryStrategy, transfers::MainSecretKey, Client, UploadCfg, BATCH_SIZE, -}; - -use bls::{SecretKey, SK_SIZE}; -use clap::Parser; -use color_eyre::{eyre::bail, Result}; -use dialoguer::Password; -use std::{ - env::current_dir, - path::{Path, PathBuf}, -}; - -#[derive(Parser, Debug)] -pub enum FoldersCmds { - Init { - /// The directory to initialise as a root folder, which can then be stored on the network (and kept in sync with). - /// By default the current path is assumed. - #[clap(name = "path", value_name = "PATH")] - path: Option, - /// The hex-encoded recovery secret key for deriving addresses, encryption and signing keys, to be used by this account packet. - #[clap(name = "recovery key")] - root_sk: Option, - }, - Download { - /// The full local path where to download the folder. By default the current path is assumed, - /// and the main Folder's network address will be used as the folder name. - #[clap(name = "target folder path")] - path: Option, - /// The hex-encoded recovery secret key for deriving addresses, encryption and signing keys, to be used by this account packet. - #[clap(name = "recovery key")] - root_sk: Option, - /// The batch_size for parallel downloading - #[clap(long, default_value_t = BATCH_SIZE , short='b')] - batch_size: usize, - /// Set the strategy to use on downloads failure. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Quick, short = 'r', help = "Sets the retry strategy on download failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, - /// Report any changes made to local version of files/folders (this doesn't compare it with their versions stored on the network). - Status { - /// Path to check changes made on. By default the current path is assumed. - #[clap(name = "path", value_name = "PATH")] - path: Option, - }, - /// Sync up local files/folders changes with their versions stored on the network. - Sync { - /// Path to sync with its remote version on the network. By default the current path is assumed. - #[clap(name = "path", value_name = "PATH")] - path: Option, - /// The batch_size to split chunks into parallel handling batches - /// during payment and upload processing. - #[clap(long, default_value_t = BATCH_SIZE, short='b')] - batch_size: usize, - /// Should the files be made accessible to all. (This is irreversible) - #[clap(long, name = "make_public", default_value = "false", short = 'p')] - make_data_public: bool, - /// Set the strategy to use on chunk upload failure. Does not modify the spend failure retry attempts yet. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Balanced, short = 'r', help = "Sets the retry strategy on upload failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, -} - -pub(crate) async fn folders_cmds( - cmds: FoldersCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - FoldersCmds::Init { path, root_sk } => { - let path = get_path(path, None)?; - // initialise path as a fresh new Folder with a network address derived from the root SK - let root_sk = get_recovery_secret_sk(root_sk, true)?; - let acc_packet = AccountPacket::init(client.clone(), root_dir, &path, &root_sk, None)?; - println!("Directory at {path:?} initialised as a root Folder, ready to track and sync changes with the network at address: {}", acc_packet.root_folder_addr().to_hex()) - } - FoldersCmds::Download { - path, - root_sk, - batch_size, - retry_strategy, - } => { - let root_sk = get_recovery_secret_sk(root_sk, false)?; - let root_sk_hex = root_sk.main_pubkey().to_hex(); - let download_folder_name = format!( - "folder_{}_{}", - &root_sk_hex[..6], - &root_sk_hex[root_sk_hex.len() - 6..] - ); - let download_folder_path = get_path(path, Some(&download_folder_name))?; - println!("Downloading onto {download_folder_path:?}, with batch-size {batch_size}"); - debug!("Downloading onto {download_folder_path:?}"); - - let _ = AccountPacket::retrieve_folders( - client, - root_dir, - &root_sk, - None, - &download_folder_path, - batch_size, - retry_strategy, - ) - .await?; - } - FoldersCmds::Status { path } => { - let path = get_path(path, None)?; - let acc_packet = AccountPacket::from_path(client.clone(), root_dir, &path, None)?; - acc_packet.status()?; - } - FoldersCmds::Sync { - path, - batch_size, - make_data_public, - retry_strategy, - } => { - let path = get_path(path, None)?; - let mut acc_packet = AccountPacket::from_path(client.clone(), root_dir, &path, None)?; - - let options = UploadCfg { - verify_store, - batch_size, - retry_strategy, - ..Default::default() - }; - acc_packet.sync(options, make_data_public).await?; - } - } - Ok(()) -} - -// Unwrap provided path, or return the current path if none was provided. -// It can optionally be provided a string to adjoin when the current dir is returned. -fn get_path(path: Option, to_join: Option<&str>) -> Result { - let path = if let Some(path) = path { - path - } else { - let current_dir = current_dir()?; - to_join.map_or_else(|| current_dir.clone(), |str| current_dir.join(str)) - }; - Ok(path) -} - -// Either get a hex-encoded SK entered by the user, or generate a new one -// TODO: get/generate a mnemonic instead -fn get_recovery_secret_sk( - root_sk: Option, - gen_new_recovery_secret: bool, -) -> Result { - let result = if let Some(str) = root_sk { - SecretKey::from_hex(&str) - } else { - let prompt_msg = if gen_new_recovery_secret { - println!( - "\n\nA recovery secret is required to derive signing/encryption keys, and network addresses, \ - used by an Account Packet." - ); - println!( - "The recovery secret used to initialise an Account Packet, can be used to retrieve and restore \ - a new replica/clone from the network, onto any local path and even onto another device.\n" - ); - - "Please enter your recovery secret for this new Account Packet,\nif you don't have one, \ - press [Enter] to generate one" - } else { - "Please enter your recovery secret" - }; - - let err_msg = format!("Hex-encoded recovery secret must be {} long", 2 * SK_SIZE); - let sk_hex = Password::new() - .with_prompt(prompt_msg) - .allow_empty_password(gen_new_recovery_secret) - .validate_with(|input: &String| -> Result<(), &str> { - let len = input.chars().count(); - if len == 0 || len == 2 * SK_SIZE { - Ok(()) - } else { - Err(&err_msg) - } - }) - .interact()?; - - println!(); - if sk_hex.is_empty() { - println!("Generating your recovery secret..."); - let sk = SecretKey::random(); - println!("\n*** Recovery secret generated ***\n{}", sk.to_hex()); - println!(); - println!( - "Please *MAKE SURE YOU DON'T LOOSE YOU RECOVERY SECRET*, and always sync up local changes \ - made to your Account Packet with the remote replica on the network to not loose them either.\n" - ); - - Ok(sk) - } else { - SecretKey::from_hex(&sk_hex) - } - }; - - match result { - Ok(sk) => Ok(MainSecretKey::new(sk)), - Err(err) => bail!("Failed to decode the recovery secret: {err:?}"), - } -} diff --git a/sn_cli/src/bin/subcommands/register.rs b/sn_cli/src/bin/subcommands/register.rs deleted file mode 100644 index 675e1ae6c5..0000000000 --- a/sn_cli/src/bin/subcommands/register.rs +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use bls::PublicKey; -use clap::Subcommand; -use color_eyre::{eyre::WrapErr, Result, Section}; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::protocol::storage::RegisterAddress; -use sn_client::registers::Permissions; - -use sn_client::{Client, Error as ClientError, WalletClient}; -use std::path::Path; -use xor_name::XorName; - -#[derive(Subcommand, Debug)] -pub enum RegisterCmds { - /// Create a new register with a name. - Create { - /// The name of the register to create. This could be the app's name. - /// This is used along with your public key to derive the address of the register - #[clap(name = "name", short = 'n')] - name: String, - - /// Create the register with public write access. - /// By default only the owner can write to the register. - #[clap(name = "public", short = 'p')] - public: bool, - }, - Edit { - /// The address of the register to edit. - #[clap(name = "address")] - address: String, - /// If you are the owner, the name of the register can be used as a shorthand to the address, - /// as we can derive the address from the public key + name - /// Use this flag if you are providing the register name instead of the address - #[clap(name = "name", short = 'n')] - use_name: bool, - /// The entry to add to the register. - #[clap(name = "entry")] - entry: String, - }, - Get { - /// The register addresses to get. - #[clap(name = "addresses")] - addresses: Vec, - /// If you are the owner, the name of the register can be used as a shorthand to the address, - /// as we can derive the address from the public key + name - /// Use this flag if you are providing the register names instead of the addresses - #[clap(name = "name", short = 'n')] - use_name: bool, - }, -} - -pub(crate) async fn register_cmds( - cmds: RegisterCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - RegisterCmds::Create { name, public } => { - create_register(name, public, client, root_dir, verify_store).await? - } - RegisterCmds::Edit { - address, - use_name, - entry, - } => edit_register(address, use_name, entry, client, verify_store).await?, - RegisterCmds::Get { - addresses, - use_name, - } => get_registers(addresses, use_name, client).await?, - } - Ok(()) -} - -async fn create_register( - name: String, - public: bool, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - trace!("Starting to pay for Register storage"); - - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None) - .wrap_err("Unable to read wallet file in {path:?}") - .suggestion( - "If you have an old wallet file, it may no longer be compatible. Try removing it", - )?; - - let mut wallet_client = WalletClient::new(client.clone(), wallet); - - let meta = XorName::from_content(name.as_bytes()); - let perms = match public { - true => Permissions::new_anyone_can_write(), - false => Permissions::default(), - }; - let (register, storage_cost, royalties_fees) = client - .create_and_pay_for_register(meta, &mut wallet_client, verify_store, perms) - .await?; - - if storage_cost.is_zero() { - println!("Register '{name}' already exists!",); - } else { - println!( - "Successfully created register '{name}' for {storage_cost:?} (royalties fees: {royalties_fees:?})!", - ); - } - - println!("REGISTER_ADDRESS={}", register.address().to_hex()); - - Ok(()) -} - -async fn edit_register( - address_str: String, - use_name: bool, - entry: String, - client: &Client, - verify_store: bool, -) -> Result<()> { - let (address, printing_name) = parse_addr(&address_str, use_name, client.signer_pk())?; - - println!("Trying to retrieve Register from {address}"); - - match client.get_register(address).await { - Ok(mut register) => { - println!("Successfully retrieved Register {printing_name}",); - println!("Editing Register {printing_name} with: {entry}"); - match register.write_online(entry.as_bytes(), verify_store).await { - Ok(()) => {} - Err(ref err @ ClientError::ContentBranchDetected(ref branches)) => { - println!( - "We need to merge {} branches in Register entries: {err}", - branches.len() - ); - register - .write_merging_branches_online(entry.as_bytes(), verify_store) - .await?; - } - Err(err) => return Err(err.into()), - } - } - Err(error) => { - println!( - "Did not retrieve Register {printing_name} from all nodes in the close group! {error}" - ); - return Err(error.into()); - } - } - - Ok(()) -} - -async fn get_registers(addresses: Vec, use_name: bool, client: &Client) -> Result<()> { - for addr in addresses { - let (address, printing_name) = parse_addr(&addr, use_name, client.signer_pk())?; - - println!("Trying to retrieve Register {printing_name}"); - - match client.get_register(address).await { - Ok(register) => { - println!("Successfully retrieved Register {printing_name}"); - let entries = register.read(); - println!("Register entries:"); - for (hash, bytes) in entries { - let data_str = match String::from_utf8(bytes.clone()) { - Ok(data_str) => data_str, - Err(_) => format!("{bytes:?}"), - }; - println!("{hash:?}: {data_str}"); - } - } - Err(error) => { - println!( - "Did not retrieve Register {printing_name} from all nodes in the close group! {error}" - ); - return Err(error.into()); - } - } - } - - Ok(()) -} - -/// Parse str and return the address and the register info for printing -fn parse_addr( - address_str: &str, - use_name: bool, - pk: PublicKey, -) -> Result<(RegisterAddress, String)> { - if use_name { - debug!("Parsing address as name"); - let user_metadata = XorName::from_content(address_str.as_bytes()); - let addr = RegisterAddress::new(user_metadata, pk); - Ok((addr, format!("'{address_str}' at {addr}"))) - } else { - debug!("Parsing address as hex"); - let addr = RegisterAddress::from_hex(address_str) - .wrap_err("Could not parse hex string") - .suggestion( - "If getting a register by name, use the `-n` flag eg:\n - safe register get -n ", - )?; - Ok((addr, format!("at {address_str}"))) - } -} diff --git a/sn_cli/src/bin/subcommands/wallet.rs b/sn_cli/src/bin/subcommands/wallet.rs deleted file mode 100644 index 0392c81874..0000000000 --- a/sn_cli/src/bin/subcommands/wallet.rs +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod audit; -pub(crate) mod helpers; -pub(crate) mod hot_wallet; -pub(crate) mod wo_wallet; - -use sn_client::transfers::{CashNote, HotWallet, MainPubkey, NanoTokens, WatchOnlyWallet}; -use sn_protocol::storage::SpendAddress; - -use crate::get_stdin_password_response; -use color_eyre::Result; -use std::{collections::BTreeSet, io::Read, path::Path}; - -// TODO: convert this into a Trait part of the wallet APIs. -pub(crate) enum WalletApiHelper { - WatchOnlyWallet(WatchOnlyWallet), - HotWallet(HotWallet), -} - -impl WalletApiHelper { - pub fn watch_only_from_pk(main_pk: MainPubkey, root_dir: &Path) -> Result { - let wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - Ok(Self::WatchOnlyWallet(wallet)) - } - - pub fn load_from(root_dir: &Path) -> Result { - let wallet = if HotWallet::is_encrypted(root_dir) { - println!("Wallet is encrypted. It needs a password to unlock."); - let password = get_stdin_password_response("Enter password: "); - let mut wallet = HotWallet::load_encrypted_from_path(root_dir, password.to_owned())?; - // Authenticate so that a user doesn't have to immediately provide the password again - wallet.authenticate_with_password(password)?; - wallet - } else { - HotWallet::load_from(root_dir)? - }; - - Ok(Self::HotWallet(wallet)) - } - - pub fn encrypt(root_dir: &Path, password: &str) -> Result<()> { - HotWallet::encrypt(root_dir, password)?; - Ok(()) - } - - pub fn balance(&self) -> NanoTokens { - match self { - Self::WatchOnlyWallet(w) => w.balance(), - Self::HotWallet(w) => w.balance(), - } - } - - pub fn status(&mut self) -> Result<()> { - self.authenticate()?; - - match self { - Self::WatchOnlyWallet(_) => Ok(()), - Self::HotWallet(w) => { - println!("Unconfirmed spends are:"); - for spend in w.unconfirmed_spend_requests().iter() { - let address = SpendAddress::from_unique_pubkey(&spend.spend.unique_pubkey); - println!( - "Unconfirmed spend {address:?} - {:?}, hex_str: {:?}", - spend.spend.unique_pubkey, - address.to_hex() - ); - println!( - "reason {:?}, amount {}, inputs: {}, outputs: {}", - spend.spend.reason, - spend.spend.amount(), - spend.spend.ancestors.len(), - spend.spend.descendants.len() - ); - println!("Inputs in hex str:"); - for input in spend.spend.ancestors.iter() { - let address = SpendAddress::from_unique_pubkey(input); - println!("Input spend {}", address.to_hex()); - } - println!("Outputs in hex str:"); - for (output, amount) in spend.spend.descendants.iter() { - let address = SpendAddress::from_unique_pubkey(output); - println!("Output {} with {amount}", address.to_hex()); - } - } - println!("Available cash notes are:"); - if let Ok(available_cnrs) = w.available_cash_notes() { - for cnr in available_cnrs.0.iter() { - println!("{cnr:?}"); - } - } - - Ok(()) - } - } - } - - pub fn read_cash_note_from_stdin(&mut self) -> Result<()> { - println!("Please paste your CashNote below:"); - let mut input = String::new(); - std::io::stdin().read_to_string(&mut input)?; - self.deposit_from_cash_note_hex(&input) - } - - pub fn deposit_from_cash_note_hex(&mut self, input: &str) -> Result<()> { - let cash_note = CashNote::from_hex(input.trim())?; - - let old_balance = self.balance(); - let cash_notes = vec![cash_note.clone()]; - - let spent_unique_pubkeys: BTreeSet<_> = cash_note - .parent_spends - .iter() - .map(|spend| spend.unique_pubkey()) - .collect(); - - match self { - Self::WatchOnlyWallet(w) => { - w.mark_notes_as_spent(spent_unique_pubkeys); - w.deposit_and_store_to_disk(&cash_notes)? - } - Self::HotWallet(w) => { - w.mark_notes_as_spent(spent_unique_pubkeys); - w.deposit_and_store_to_disk(&cash_notes)? - } - } - let new_balance = self.balance(); - println!("Successfully stored cash_note to wallet dir. \nOld balance: {old_balance}\nNew balance: {new_balance}"); - - Ok(()) - } - - pub fn deposit(&mut self, read_from_stdin: bool, cash_note: Option<&str>) -> Result<()> { - if read_from_stdin { - return self.read_cash_note_from_stdin(); - } - - if let Some(cash_note_hex) = cash_note { - return self.deposit_from_cash_note_hex(cash_note_hex); - } - - let previous_balance = self.balance(); - - self.try_load_cash_notes()?; - - let deposited = NanoTokens::from(self.balance().as_nano() - previous_balance.as_nano()); - if deposited.is_zero() { - println!("Nothing deposited."); - } else if let Err(err) = self.deposit_and_store_to_disk(&vec![]) { - println!("Failed to store deposited ({deposited}) amount: {err:?}"); - } else { - println!("Deposited {deposited}."); - } - - Ok(()) - } - - fn deposit_and_store_to_disk(&mut self, cash_notes: &Vec) -> Result<()> { - match self { - Self::WatchOnlyWallet(w) => w.deposit_and_store_to_disk(cash_notes)?, - Self::HotWallet(w) => w.deposit_and_store_to_disk(cash_notes)?, - } - Ok(()) - } - - fn try_load_cash_notes(&mut self) -> Result<()> { - match self { - Self::WatchOnlyWallet(w) => w.try_load_cash_notes()?, - Self::HotWallet(w) => w.try_load_cash_notes()?, - } - Ok(()) - } - - /// Authenticate with password for encrypted wallet. - fn authenticate(&mut self) -> Result<()> { - match self { - WalletApiHelper::WatchOnlyWallet(_) => Ok(()), - WalletApiHelper::HotWallet(w) => { - if w.authenticate().is_err() { - let password = get_stdin_password_response("Wallet password: "); - w.authenticate_with_password(password)?; - Ok(()) - } else { - Ok(()) - } - } - } - } -} - -fn watch_only_wallet_from_pk(main_pk: MainPubkey, root_dir: &Path) -> Result { - let pk_hex = main_pk.to_hex(); - let folder_name = format!("pk_{}_{}", &pk_hex[..6], &pk_hex[pk_hex.len() - 6..]); - let wallet_dir = root_dir.join(folder_name); - println!( - "Loading watch-only local wallet from: {}", - wallet_dir.display() - ); - let wallet = WatchOnlyWallet::load_from(&wallet_dir, main_pk)?; - Ok(wallet) -} diff --git a/sn_cli/src/bin/subcommands/wallet/audit.rs b/sn_cli/src/bin/subcommands/wallet/audit.rs deleted file mode 100644 index c0e3833d50..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/audit.rs +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use std::path::Path; -use std::str::FromStr; - -use bls::SecretKey; -use color_eyre::eyre::bail; -use color_eyre::Result; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::transfers::{CashNoteRedemption, SpendAddress, Transfer, GENESIS_SPEND_UNIQUE_KEY}; -use sn_client::{Client, SpendDag}; - -const SPEND_DAG_FILENAME: &str = "spend_dag"; -const SPENDS_PROCESSING_BUFFER_SIZE: usize = 4096; - -async fn step_by_step_spend_dag_gathering(client: &Client, mut dag: SpendDag) -> Result { - let start_time = std::time::Instant::now(); - println!("Gathering the Spend DAG, note that this might take a very long time..."); - let (tx, mut rx) = tokio::sync::mpsc::channel(SPENDS_PROCESSING_BUFFER_SIZE); - tokio::spawn(async move { - let mut spend_count = 0; - let mut exponential = 64; - while let Some(_spend) = rx.recv().await { - spend_count += 1; - if spend_count % exponential == 0 { - println!("Collected {spend_count} spends..."); - exponential *= 2; - } - } - }); - - client - .spend_dag_continue_from_utxos(&mut dag, Some(tx), false) - .await; - println!("Done gathering the Spend DAG in {:?}", start_time.elapsed()); - - // verify the DAG - if let Err(e) = dag.record_faults(&dag.source()) { - println!("DAG verification failed: {e}"); - } else { - let faults_len = dag.faults().len(); - println!("DAG verification successful, identified {faults_len} faults.",); - if faults_len > 0 { - println!("Logging identified faults: {:#?}", dag.faults()); - } - } - Ok(dag) -} - -/// Gather the Spend DAG from the Network and store it on disk -/// If a DAG is found on disk, it will continue from it -/// If fast_mode is true, gathers in a silent and fast way -/// else enjoy a step by step slow narrated gathering -async fn gather_spend_dag(client: &Client, root_dir: &Path, fast_mode: bool) -> Result { - let dag_path = root_dir.join(SPEND_DAG_FILENAME); - let inital_dag = match SpendDag::load_from_file(&dag_path) { - Ok(mut dag) => { - println!("Found a local spend dag on disk, continuing from it..."); - if fast_mode { - client - .spend_dag_continue_from_utxos(&mut dag, None, false) - .await; - } - dag - } - Err(err) => { - println!("Starting from Genesis as found no local spend dag on disk..."); - info!("Starting from Genesis as failed to load spend dag from disk: {err}"); - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - if fast_mode { - client - .spend_dag_build_from(genesis_addr, None, true) - .await? - } else { - client.new_dag_with_genesis_only().await? - } - } - }; - - let dag = match fast_mode { - true => inital_dag, - false => step_by_step_spend_dag_gathering(client, inital_dag).await?, - }; - - println!("Saving DAG to disk at: {dag_path:?}"); - dag.dump_to_file(dag_path)?; - - Ok(dag) -} - -pub async fn audit( - client: &Client, - to_dot: bool, - royalties: bool, - root_dir: &Path, - foundation_sk: Option, -) -> Result<()> { - let fast_mode = to_dot || royalties || foundation_sk.is_some(); - let dag = gather_spend_dag(client, root_dir, fast_mode).await?; - - if to_dot { - println!("========================== spends DAG digraph =========================="); - println!("{}", dag.dump_dot_format()); - } - if let Some(sk) = foundation_sk { - println!( - "========================== payment forward statistics ==========================" - ); - println!("{}", dag.dump_payment_forward_statistics(&sk)); - } - if royalties { - let royalties = dag.all_royalties()?; - redeem_royalties(royalties, client, root_dir).await?; - } - - println!("Audit completed successfully."); - Ok(()) -} - -/// Redeem royalties from the Network and deposit them into the wallet -/// Only works if the wallet has the private key for the royalties -async fn redeem_royalties( - royalties: Vec, - client: &Client, - root_dir: &Path, -) -> Result<()> { - if royalties.is_empty() { - println!("No royalties found to redeem."); - return Ok(()); - } else { - println!("Found {} royalties.", royalties.len()); - } - - let mut wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - // batch royalties per 100 - let mut batch = Vec::new(); - for (i, royalty) in royalties.iter().enumerate() { - batch.push(royalty.clone()); - if i % 100 == 0 { - println!( - "Attempting to redeem {} royalties from the Network...", - batch.len() - ); - let transfer = Transfer::NetworkRoyalties(batch.clone()); - batch.clear(); - println!("Current balance: {}", wallet.balance()); - let cashnotes = client.receive(&transfer, &wallet).await?; - wallet.deposit_and_store_to_disk(&cashnotes)?; - println!("Successfully redeemed royalties from the Network."); - println!("Current balance: {}", wallet.balance()); - } - } - Ok(()) -} - -/// Verify a spend's existance on the Network. -/// If genesis is true, verify all the way to Genesis, note that this might take A VERY LONG TIME -pub async fn verify_spend_at( - spend_address: String, - genesis: bool, - client: &Client, - root_dir: &Path, -) -> Result<()> { - // get spend - println!("Verifying spend's existance at: {spend_address}"); - let addr = SpendAddress::from_str(&spend_address)?; - let spend = match client.get_spend_from_network(addr).await { - Ok(s) => { - println!("Confirmed spend's existance on the Network at {addr:?}"); - s - } - Err(err) => { - bail!("Could not confirm spend's validity, be careful: {err}") - } - }; - - // stop here if we don't go all the way to Genesis - if !genesis { - return Ok(()); - } - println!("Verifying spend all the way to Genesis, note that this might take a while..."); - - // extend DAG until spend - let dag_path = root_dir.join(SPEND_DAG_FILENAME); - let mut dag = match SpendDag::load_from_file(&dag_path) { - Ok(d) => { - println!("Found a local spend dag on disk, continuing from it, this might make things faster..."); - d - } - Err(err) => { - info!("Starting verification from an empty DAG as failed to load spend dag from disk: {err}"); - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - SpendDag::new(genesis_addr) - } - }; - info!("Extending DAG with {spend_address} {addr:?}"); - client.spend_dag_extend_until(&mut dag, addr, spend).await?; - info!("Saving DAG locally at: {dag_path:?}"); - dag.dump_to_file(dag_path)?; - - // verify spend is not faulty - let faults = dag.get_spend_faults(&addr); - if faults.is_empty() { - println!( - "Successfully confirmed spend at {spend_address} is valid, and comes from Genesis!" - ); - } else { - println!("Spend at {spend_address} has {} faults", faults.len()); - println!("{faults:#?}"); - } - - Ok(()) -} diff --git a/sn_cli/src/bin/subcommands/wallet/helpers.rs b/sn_cli/src/bin/subcommands/wallet/helpers.rs deleted file mode 100644 index e3ef2d6687..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/helpers.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#[cfg(feature = "distribution")] -use base64::Engine; -use color_eyre::Result; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::transfers::Transfer; -use sn_client::Client; -use std::path::Path; -use url::Url; - -#[cfg(feature = "distribution")] -pub async fn get_faucet( - root_dir: &Path, - client: &Client, - url: String, - address: Option, - signature: Option, -) -> Result<()> { - if address.is_some() ^ signature.is_some() { - println!("Address and signature must both be specified."); - return Ok(()); - } - if address.is_none() && signature.is_none() { - get_faucet_fixed_amount(root_dir, client, url).await?; - } else if let Some(addr) = address { - if let Some(sig) = signature { - get_faucet_distribution(root_dir, client, url, addr, sig).await?; - } - } - Ok(()) -} - -#[cfg(not(feature = "distribution"))] -pub async fn get_faucet( - root_dir: &Path, - client: &Client, - url: String, - _address: Option, - _signature: Option, -) -> Result<()> { - get_faucet_fixed_amount(root_dir, client, url).await -} - -pub async fn get_faucet_fixed_amount(root_dir: &Path, client: &Client, url: String) -> Result<()> { - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - let address_hex = wallet.address().to_hex(); - let url = if !url.contains("://") { - format!("{}://{}", "http", url) - } else { - url - }; - let req_url = Url::parse(&format!("{url}/{address_hex}"))?; - println!("Requesting token for wallet address: {address_hex}"); - - let response = reqwest::get(req_url).await?; - let is_ok = response.status().is_success(); - let body = response.text().await?; - if is_ok { - receive(body, false, client, root_dir).await?; - println!("Successfully got tokens from faucet."); - } else { - println!("Failed to get tokens from faucet, server responded with: {body:?}"); - } - Ok(()) -} - -#[cfg(feature = "distribution")] -pub async fn get_faucet_distribution( - root_dir: &Path, - client: &Client, - url: String, - address: String, - signature: String, -) -> Result<()> { - // submit the details to the faucet to get the distribution - let url = if !url.contains("://") { - format!("{}://{}", "http", url) - } else { - url - }; - // receive to the current local wallet - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)? - .address() - .to_hex(); - println!("Requesting distribution for maid address {address} to local wallet {wallet}"); - // base64 uses + and / as the delimiters which doesn't go well in the query - // string, so the signature is encoded using url safe characters. - let sig_bytes = base64::engine::general_purpose::STANDARD.decode(signature)?; - let sig_url = base64::engine::general_purpose::URL_SAFE.encode(sig_bytes); - let req_url = Url::parse(&format!( - "{url}/distribution?address={address}&wallet={wallet}&signature={sig_url}" - ))?; - let response = reqwest::get(req_url).await?; - let is_ok = response.status().is_success(); - let transfer_hex = response.text().await?; - if !is_ok { - println!( - "Failed to get distribution from faucet, server responded with:\n{transfer_hex:?}" - ); - return Ok(()); - } - println!("Receiving transfer for maid address {address}:\n{transfer_hex}"); - receive(transfer_hex, false, client, root_dir).await?; - Ok(()) -} - -pub async fn receive( - transfer: String, - is_file: bool, - client: &Client, - root_dir: &Path, -) -> Result<()> { - let transfer = if is_file { - std::fs::read_to_string(transfer)?.trim().to_string() - } else { - transfer - }; - - let transfer = match Transfer::from_hex(&transfer) { - Ok(transfer) => transfer, - Err(err) => { - println!("Failed to parse transfer: {err:?}"); - println!("Transfer: \"{transfer}\""); - return Err(err.into()); - } - }; - println!("Successfully parsed transfer. "); - - println!("Verifying transfer with the Network..."); - let mut wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - let cashnotes = match client.receive(&transfer, &wallet).await { - Ok(cashnotes) => cashnotes, - Err(err) => { - println!("Failed to verify and redeem transfer: {err:?}"); - return Err(err.into()); - } - }; - println!("Successfully verified transfer."); - - let old_balance = wallet.balance(); - wallet.deposit_and_store_to_disk(&cashnotes)?; - let new_balance = wallet.balance(); - - println!("Successfully stored cash_note to wallet dir."); - println!("Old balance: {old_balance}"); - println!("New balance: {new_balance}"); - - Ok(()) -} diff --git a/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs b/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs deleted file mode 100644 index 6b209a9625..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - audit::{audit, verify_spend_at}, - helpers::{get_faucet, receive}, - WalletApiHelper, -}; -use crate::{get_stdin_password_response, get_stdin_response}; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::{eyre::eyre, Result}; -use dialoguer::Confirm; -use sn_cli::utils::is_valid_key_hex; -use sn_client::acc_packet::{load_or_create_mnemonic, secret_key_from_mnemonic}; -use sn_client::transfers::{ - HotWallet, MainPubkey, MainSecretKey, NanoTokens, Transfer, TransferError, UnsignedTransaction, - WalletError, -}; -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, Client, Error as ClientError, -}; -use std::{path::Path, str::FromStr}; - -// Please do not remove the blank lines in these doc comments. -// They are used for inserting line breaks when the help menu is rendered in the UI. -#[derive(Parser, Debug)] -pub enum WalletCmds { - /// Print the wallet address. - Address, - /// Print the wallet balance. - Balance { - /// Instead of checking CLI local wallet balance, the PeerId of a node can be used - /// to check the balance of its rewards local wallet. Multiple ids can be provided - /// in order to read the balance of multiple nodes at once. - #[clap(long)] - peer_id: Vec, - }, - /// Create a hot wallet. - Create { - /// Optional flag to not replace existing wallet. - #[clap(long, action)] - no_replace: bool, - /// Optional flag to not add a password. - #[clap(long, action)] - no_password: bool, - /// Optional hex-encoded main secret key. - #[clap(long, short, name = "key")] - key: Option, - /// Optional derivation passphrase to protect the mnemonic, - /// it's not the source of the entropy for the mnemonic generation. - /// The mnemonic+passphrase will be the seed. See detail at - /// `` - #[clap(long, short, name = "derivation")] - derivation_passphrase: Option, - /// Optional password to encrypt the wallet with. - #[clap(long, short)] - password: Option, - }, - /// Get tokens from a faucet. - GetFaucet { - /// The http url of the faucet to get tokens from. - #[clap(name = "url")] - url: String, - /// The maidsafecoin address to claim. Leave blank to receive a fixed - /// amount of tokens. - maid_address: Option, - /// A signature of the safe wallet address, made by the maidsafecoin - /// address. - signature: Option, - }, - /// Send a transfer. - /// - /// This command will create a new transfer and encrypt it for the recipient. - /// This encrypted transfer can then be shared with the recipient, who can then - /// use the 'receive' command to claim the funds. - Send { - /// The number of SafeNetworkTokens to send. - #[clap(name = "amount")] - amount: String, - /// Hex-encoded public address of the recipient. - #[clap(name = "to")] - to: String, - }, - /// Signs a transaction to be then broadcasted to the network. - Sign { - /// Hex-encoded unsigned transaction. It requires a hot-wallet was created for CLI. - #[clap(name = "tx")] - tx: String, - /// Avoid prompts by assuming `yes` as the answer. - #[clap(long, name = "force", default_value = "false")] - force: bool, - }, - /// Receive a transfer created by the 'send' or 'broadcast' command. - Receive { - /// Read the encrypted transfer from a file. - #[clap(long, default_value = "false")] - file: bool, - /// Encrypted transfer. - #[clap(name = "transfer")] - transfer: String, - }, - /// Verify a spend on the Network. - Verify { - /// The Network address or hex encoded UniquePubkey of the Spend to verify - #[clap(name = "spend")] - spend_address: String, - /// Verify all the way to Genesis - /// - /// Used for auditing, note that this might take a very long time - /// Analogous to verifying an UTXO through the entire blockchain in Bitcoin - #[clap(long, default_value = "false")] - genesis: bool, - }, - /// Audit the Currency - /// Note that this might take a very long time - /// Analogous to verifying the entire blockchain in Bitcoin - /// - /// When run without any flags, runs in verbose mode, - /// a slower but more informative mode where DAG collection progress is diplayed - Audit { - /// EXPERIMENTAL Dump Audit DAG in dot format on stdout - #[clap(long, default_value = "false")] - dot: bool, - /// EXPERIMENTAL redeem all royalties - #[clap(long, default_value = "false")] - royalties: bool, - /// Hex string of the Foundation SK. - /// Providing this key allow displaying rewards statistics gathered from the DAG. - #[clap(long, name = "sk_str")] - sk_str: Option, - }, - Status, - /// Encrypt wallet with a password. - Encrypt, -} - -pub(crate) async fn wallet_cmds_without_client(cmds: &WalletCmds, root_dir: &Path) -> Result<()> { - match cmds { - WalletCmds::Address => { - let wallet = WalletApiHelper::load_from(root_dir)?; - match wallet { - WalletApiHelper::WatchOnlyWallet(w) => println!("{:?}", w.address()), - WalletApiHelper::HotWallet(w) => println!("{:?}", w.address()), - } - Ok(()) - } - WalletCmds::Balance { peer_id } => { - if peer_id.is_empty() { - let wallet = WalletApiHelper::load_from(root_dir)?; - println!("{}", wallet.balance()); - } else { - let default_node_dir_path = dirs_next::data_dir() - .ok_or_else(|| eyre!("Failed to obtain data directory path"))? - .join("safe") - .join("node"); - - for id in peer_id { - let path = default_node_dir_path.join(id); - let rewards = WalletApiHelper::load_from(&path)?.balance(); - println!("Node's rewards wallet balance (PeerId: {id}): {rewards}"); - } - } - Ok(()) - } - WalletCmds::Create { - no_replace, - no_password, - key, - derivation_passphrase, - password, - } => { - let mut wallet_already_exists = false; - if key.is_some() && derivation_passphrase.is_some() { - return Err(eyre!( - "Only one of `--key` or `--derivation` may be specified" - )); - } - if *no_password && password.is_some() { - return Err(eyre!( - "Only one of `--no-password` or `--password` may be specified" - )); - } - if let Some(key) = key { - // Check if key is valid - // Doing this early to avoid stashing an existing wallet while the provided key is invalid - if !is_valid_key_hex(key) { - return Err(eyre!("Please provide a valid secret key in hex format. It must be 64 characters long.")); - } - } - // Check for existing wallet - if HotWallet::is_encrypted(root_dir) { - wallet_already_exists = true; - println!("Existing encrypted wallet found."); - } else if let Ok(existing_wallet) = WalletApiHelper::load_from(root_dir) { - wallet_already_exists = true; - let balance = existing_wallet.balance(); - println!("Existing wallet found with balance of {balance}"); - } - // If a wallet already exists, ask the user if they want to replace it - if wallet_already_exists { - let response = if *no_replace { - "n".to_string() - } else { - get_stdin_response("Replace existing wallet with new wallet? [y/N]") - }; - if response != "y" { - // Do nothing, return ok and prevent any further operations - println!("Exiting without creating new wallet"); - return Ok(()); - } - // remove existing wallet - let new_location = HotWallet::stash(root_dir)?; - println!("Old wallet stored at {}", new_location.display()); - } - let main_sk = if let Some(key) = key { - let sk = SecretKey::from_hex(key) - .map_err(|err| eyre!("Failed to parse hex-encoded SK: {err:?}"))?; - MainSecretKey::new(sk) - } else { - // If no key is specified, use the mnemonic - let mnemonic = load_or_create_mnemonic(root_dir)?; - secret_key_from_mnemonic(mnemonic, derivation_passphrase.to_owned())? - }; - // Ask user if they want to encrypt the wallet with a password - let password = if *no_password { - None - } else if let Some(password) = password { - Some(password.to_owned()) - } else { - request_password(false) - }; - // Create the new wallet with the new key - let main_pubkey = main_sk.main_pubkey(); - let local_wallet = HotWallet::create_from_key(root_dir, main_sk, password)?; - let balance = local_wallet.balance(); - println!( - "Hot Wallet created (balance {balance}) for main public key: {main_pubkey:?}." - ); - Ok(()) - } - WalletCmds::Sign { tx, force } => sign_transaction(tx, root_dir, *force), - WalletCmds::Status => { - let mut wallet = WalletApiHelper::load_from(root_dir)?; - println!("{}", wallet.balance()); - wallet.status()?; - Ok(()) - } - WalletCmds::Encrypt => { - println!("Encrypt your wallet with a password. WARNING: If you forget your password, you will lose access to your wallet!"); - // Ask user for a new password to encrypt the wallet with - if let Some(password) = request_password(true) { - WalletApiHelper::encrypt(root_dir, &password)?; - } - println!("Wallet successfully encrypted."); - Ok(()) - } - cmd => Err(eyre!("{cmd:?} requires us to be connected to the Network")), - } -} - -pub(crate) async fn wallet_cmds( - cmds: WalletCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - WalletCmds::Send { amount, to } => send(amount, to, client, root_dir, verify_store).await, - WalletCmds::Receive { file, transfer } => receive(transfer, file, client, root_dir).await, - WalletCmds::GetFaucet { - url, - maid_address, - signature, - } => get_faucet(root_dir, client, url.clone(), maid_address, signature).await, - WalletCmds::Audit { - dot, - royalties, - sk_str, - } => { - let sk_key = if let Some(s) = sk_str { - match SecretKey::from_hex(&s) { - Ok(sk_key) => Some(sk_key), - Err(err) => { - return Err(eyre!( - "Cann't parse Foundation SK from input string: {s} {err:?}" - )) - } - } - } else { - None - }; - audit(client, dot, royalties, root_dir, sk_key).await - } - WalletCmds::Verify { - spend_address, - genesis, - } => verify_spend_at(spend_address, genesis, client, root_dir).await, - cmd => Err(eyre!( - "{cmd:?} has to be processed before connecting to the network" - )), - } -} - -async fn send( - amount: String, - to: String, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - let from = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - let amount = match NanoTokens::from_str(&amount) { - Ok(amount) => amount, - Err(err) => { - println!("The amount cannot be parsed. Nothing sent."); - return Err(err.into()); - } - }; - let to = match MainPubkey::from_hex(to) { - Ok(to) => to, - Err(err) => { - println!("Error while parsing the recipient's 'to' key: {err:?}"); - return Err(err.into()); - } - }; - - let cash_note = match sn_client::send(from, amount, to, client, verify_store).await { - Ok(cash_note) => { - let wallet = HotWallet::load_from(root_dir)?; - println!("Sent {amount:?} to {to:?}"); - println!("New wallet balance is {}.", wallet.balance()); - cash_note - } - Err(err) => { - match err { - ClientError::AmountIsZero => { - println!("Zero amount passed in. Nothing sent."); - } - ClientError::Wallet(WalletError::Transfer(TransferError::NotEnoughBalance( - available, - required, - ))) => { - println!("Could not send due to low balance.\nBalance: {available:?}\nRequired: {required:?}"); - } - _ => { - println!("Failed to send {amount:?} to {to:?} due to {err:?}."); - } - } - return Err(err.into()); - } - }; - - let transfer = Transfer::transfer_from_cash_note(&cash_note)?.to_hex()?; - println!("The encrypted transfer has been successfully created."); - println!("Please share this to the recipient:\n\n{transfer}\n"); - println!("The recipient can then use the 'receive' command to claim the funds."); - - Ok(()) -} - -fn sign_transaction(tx: &str, root_dir: &Path, force: bool) -> Result<()> { - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - let unsigned_tx = UnsignedTransaction::from_hex(tx)?; - - println!("The unsigned transaction has been successfully decoded:"); - for (i, (unique_pk, amount)) in unsigned_tx.spent_unique_keys().iter().enumerate() { - println!("\nSpending input #{i}:"); - println!("\tKey: {}", unique_pk.to_hex()); - println!("\tAmount: {amount}"); - - for (descendant, amount) in unsigned_tx.output_unique_keys().iter() { - println!("\tOutput Key: {}", descendant.to_hex()); - println!("\tAmount: {amount}"); - } - } - - if !force { - println!("\n** Please make sure the above information is correct before signing it. **\n"); - let confirmation = Confirm::new() - .with_prompt("Do you want to sign the above transaction?") - .interact()?; - - if !confirmation { - println!("Transaction not signed."); - return Ok(()); - } - } - - println!("Signing the transaction with local hot-wallet..."); - let signed_tx = wallet.sign(unsigned_tx)?; - - println!( - "The transaction has been successfully signed:\n\n{}\n", - signed_tx.to_hex()? - ); - println!( - "Please copy the above text, and broadcast it to the network with 'wallet broadcast' cmd." - ); - - Ok(()) -} - -fn request_password(required: bool) -> Option { - 'outer: loop { - let prompt = if required { - "Enter password: " - } else { - "Enter password (leave empty for none): " - }; - - let password_response = get_stdin_password_response(prompt); - - if required && password_response.is_empty() { - println!("Password is required."); - continue 'outer; - } - - // If a password is set, request user to repeat it - if !password_response.is_empty() { - const MAX_RETRIES: u8 = 2; - let mut retries = 0u8; - - loop { - let repeat_password = get_stdin_password_response("Repeat password: "); - - if repeat_password == password_response { - break; - } else if retries >= MAX_RETRIES { - // User forgot the password, let them reset it again - println!("You might have forgotten the password. Please set a new one."); - continue 'outer; - } else { - println!("Passwords do not match."); - retries += 1; - } - } - - break Some(password_response); - } - - break None; - } -} diff --git a/sn_cli/src/bin/subcommands/wallet/wo_wallet.rs b/sn_cli/src/bin/subcommands/wallet/wo_wallet.rs deleted file mode 100644 index c4513754ba..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/wo_wallet.rs +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{audit::verify_spend_at, watch_only_wallet_from_pk, WalletApiHelper}; - -use bls::PublicKey; -use clap::Parser; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use dialoguer::Confirm; -use sn_client::transfers::{MainPubkey, NanoTokens, SignedTransaction, Transfer, WatchOnlyWallet}; -use sn_client::Client; -use std::{path::Path, str::FromStr}; -use walkdir::WalkDir; - -// Please do not remove the blank lines in these doc comments. -// They are used for inserting line breaks when the help menu is rendered in the UI. -#[derive(Parser, Debug)] -pub enum WatchOnlyWalletCmds { - /// Print the watch-only wallets addresses. - Addresses, - /// Print the wallet balance. - Balance { - /// The hex-encoded public key of an existing watch-only wallet. - #[clap(name = "public key")] - pk: Option, - }, - /// Deposit CashNotes from the received directory to the chosen watch-only wallet. - /// Or Read a hex encoded CashNote from stdin. - /// - /// The default received directory is platform specific: - /// - Linux: $HOME/.local/share/safe/client/\/cash_notes - /// - macOS: $HOME/Library/Application Support/safe/client/\/cash_notes - /// - Windows: C:\Users\{username}\AppData\Roaming\safe\client\\\cash_notes - /// - /// If you find the default path unwieldy, you can also set the RECEIVED_CASHNOTES_PATH environment - /// variable to a path you would prefer to work with. - #[clap(verbatim_doc_comment)] - Deposit { - /// Read a hex encoded CashNote from stdin. - #[clap(long, default_value = "false")] - stdin: bool, - /// The hex encoded CashNote. - #[clap(long)] - cash_note: Option, - /// The hex-encoded public key of an existing watch-only wallet to deposit into it. - #[clap(name = "public key")] - pk: String, - }, - /// Create a watch-only wallet from the given (hex-encoded) key. - Create { - /// Hex-encoded main public key. - #[clap(name = "public key")] - pk: String, - }, - /// Builds an unsigned transaction to be signed offline. It requires an existing watch-only wallet. - Transaction { - /// Hex-encoded public key of the source watch-only wallet. - #[clap(name = "from")] - from: String, - /// The number of SafeNetworkTokens to transfer. - #[clap(name = "amount")] - amount: String, - /// Hex-encoded public address of the recipient. - #[clap(name = "to")] - to: String, - }, - /// This command turns an offline signed transaction into a valid sendable Transfer - /// The signed transaction's SignedSpends are broadcasted to the Network and the recipient's Transfer is returned - /// This Transfer can then be sent and redeemed by the recipient using the 'receive' command - Broadcast { - /// Hex-encoded signed transaction. - #[clap(name = "signed Tx")] - signed_tx: String, - /// Avoid prompts by assuming `yes` as the answer. - #[clap(long, name = "force", default_value = "false")] - force: bool, - }, - /// Verify a spend on the Network. - Verify { - /// The Network address or hex encoded UniquePubkey of the Spend to verify - #[clap(name = "spend")] - spend_address: String, - /// Verify all the way to Genesis - /// - /// Used for auditing, note that this might take a very long time - /// Analogous to verifying an UTXO through the entire blockchain in Bitcoin - #[clap(long, default_value = "false")] - genesis: bool, - }, -} - -pub(crate) async fn wo_wallet_cmds_without_client( - cmds: &WatchOnlyWalletCmds, - root_dir: &Path, -) -> Result<()> { - match cmds { - WatchOnlyWalletCmds::Addresses => { - let wallets = get_watch_only_wallets(root_dir)?; - println!( - "Addresses of {} watch-only wallets found at {}:", - wallets.len(), - root_dir.display() - ); - for (wo_wallet, _) in wallets { - println!("- {:?}", wo_wallet.address()); - } - Ok(()) - } - WatchOnlyWalletCmds::Balance { pk } => { - if let Some(pk) = pk { - let main_pk = MainPubkey::from_hex(pk)?; - let watch_only_wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - println!("{}", watch_only_wallet.balance()); - } else { - let wallets = get_watch_only_wallets(root_dir)?; - println!( - "Balances of {} watch-only wallets found at {}:", - wallets.len(), - root_dir.display() - ); - let mut total = NanoTokens::zero(); - for (wo_wallet, folder_name) in wallets { - let balance = wo_wallet.balance(); - println!("{folder_name}: {balance}"); - total = total - .checked_add(balance) - .ok_or(eyre!("Failed to add to total balance"))?; - } - println!("Total: {total}"); - } - Ok(()) - } - WatchOnlyWalletCmds::Deposit { - stdin, - cash_note, - pk, - } => { - let main_pk = MainPubkey::from_hex(pk)?; - let mut wallet = WalletApiHelper::watch_only_from_pk(main_pk, root_dir)?; - wallet.deposit(*stdin, cash_note.as_deref()) - } - WatchOnlyWalletCmds::Create { pk } => { - let pk = PublicKey::from_hex(pk) - .map_err(|err| eyre!("Failed to parse hex-encoded PK: {err:?}"))?; - let main_pk = MainPubkey::new(pk); - let main_pubkey = main_pk.public_key(); - let watch_only_wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - let balance = watch_only_wallet.balance(); - println!("Watch-only wallet created (balance {balance}) for main public key: {main_pubkey:?}."); - Ok(()) - } - WatchOnlyWalletCmds::Transaction { from, amount, to } => { - build_unsigned_transaction(from, amount, to, root_dir) - } - cmd => Err(eyre!("{cmd:?} requires us to be connected to the Network")), - } -} - -pub(crate) async fn wo_wallet_cmds( - cmds: WatchOnlyWalletCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - WatchOnlyWalletCmds::Broadcast { signed_tx, force } => { - broadcast_signed_tx(signed_tx, client, verify_store, force).await - } - WatchOnlyWalletCmds::Verify { - spend_address, - genesis, - } => verify_spend_at(spend_address, genesis, client, root_dir).await, - cmd => Err(eyre!( - "{cmd:?} has to be processed before connecting to the network" - )), - } -} - -fn get_watch_only_wallets(root_dir: &Path) -> Result> { - let mut wallets = vec![]; - for entry in WalkDir::new(root_dir.display().to_string()) - .into_iter() - .flatten() - { - if let Some(file_name) = entry.path().file_name().and_then(|name| name.to_str()) { - if file_name.starts_with("pk_") { - let wallet_dir = root_dir.join(file_name); - if let Ok(wo_wallet) = WatchOnlyWallet::load_from_path(&wallet_dir) { - wallets.push((wo_wallet, file_name.to_string())); - } - } - } - } - if wallets.is_empty() { - bail!("No watch-only wallets found at {}", root_dir.display()); - } - - Ok(wallets) -} - -fn build_unsigned_transaction(from: &str, amount: &str, to: &str, root_dir: &Path) -> Result<()> { - let main_pk = MainPubkey::from_hex(from)?; - let mut wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - let amount = match NanoTokens::from_str(amount) { - Ok(amount) => amount, - Err(err) => { - println!("The amount cannot be parsed. Nothing sent."); - return Err(err.into()); - } - }; - let to = match MainPubkey::from_hex(to) { - Ok(to) => to, - Err(err) => { - println!("Error while parsing the recipient's 'to' key: {err:?}"); - return Err(err.into()); - } - }; - - let unsigned_transfer = wallet.build_unsigned_transaction(vec![(amount, to)], None)?; - - println!( - "The unsigned transaction has been successfully created:\n\n{}\n", - hex::encode(rmp_serde::to_vec(&unsigned_transfer)?) - ); - println!("Please copy the above text, sign it offline with 'wallet sign' cmd, and then use the signed transaction to broadcast it with 'wallet broadcast' cmd."); - - Ok(()) -} - -async fn broadcast_signed_tx( - signed_tx: String, - client: &Client, - verify_store: bool, - force: bool, -) -> Result<()> { - let signed_tx = match SignedTransaction::from_hex(&signed_tx) { - Ok(signed_tx) => signed_tx, - Err(err) => { - bail!("Failed to decode the signed transaction: {err:?}"); - } - }; - println!("The signed transaction has been successfully decoded:"); - - for (i, signed_spend) in signed_tx.spends.iter().enumerate() { - println!("\nSpending input #{i}:"); - println!("\tKey: {}", signed_spend.unique_pubkey().to_hex()); - println!("\tAmount: {}", signed_spend.amount()); - - if let Err(err) = signed_spend.verify() { - bail!("Transaction is invalid: {err:?}"); - } - - for (descendant, amount) in signed_spend.spend.descendants.iter() { - println!("\tOutput Key: {}", descendant.to_hex()); - println!("\tAmount: {amount}"); - } - } - - if !force { - println!( - "\n** Please make sure the above information is correct before broadcasting it. **\n" - ); - let confirmation = Confirm::new() - .with_prompt("Do you want to broadcast the above transaction?") - .interact()?; - - if !confirmation { - println!("Transaction was not broadcasted."); - return Ok(()); - } - } - - println!("Broadcasting the transaction to the network..."); - // return the first CashNote (assuming there is only one because we only sent to one recipient) - let cash_note = match &signed_tx.output_cashnotes[..] { - [cashnote] => cashnote, - [_multiple, ..] => bail!("Multiple CashNotes were returned from the transaction when only one was expected. This is a BUG."), - [] =>bail!("No CashNotes were built from the Tx.") - }; - - // send to network - client - .send_spends(signed_tx.spends.iter(), verify_store) - .await - .map_err(|err| { - eyre!("The transfer was not successfully registered in the network: {err:?}") - })?; - - println!("Transaction broadcasted!."); - - let transfer = Transfer::transfer_from_cash_note(cash_note)?.to_hex()?; - println!("Please share this to the recipient:\n\n{transfer}\n"); - println!("The recipient can then use the wallet 'receive' command to claim the funds.\n"); - - if let Some(change_cn) = signed_tx.change_cashnote { - let change_transfer = Transfer::transfer_from_cash_note(&change_cn)?.to_hex()?; - println!("Please redeem the change from this Transaction:\n\n{change_transfer}\n"); - println!("You should use the wallet 'deposit' command to be able to use these funds.\n"); - } - - Ok(()) -} diff --git a/sn_cli/src/files/chunk_manager.rs b/sn_cli/src/files/chunk_manager.rs deleted file mode 100644 index 577ff0e111..0000000000 --- a/sn_cli/src/files/chunk_manager.rs +++ /dev/null @@ -1,1045 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::get_progress_bar; -use super::upload::UploadedFile; -use bytes::Bytes; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; -use sn_client::{ - protocol::storage::{Chunk, ChunkAddress}, - FilesApi, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - ffi::OsString, - fs::{self, File}, - io::Write, - path::{Path, PathBuf}, - time::Instant, -}; -use tracing::{debug, error, info, trace}; -use walkdir::{DirEntry, WalkDir}; -use xor_name::XorName; - -const CHUNK_ARTIFACTS_DIR: &str = "chunk_artifacts"; -const METADATA_FILE: &str = "metadata"; - -// The unique hex encoded hash(path) -// This allows us to uniquely identify if a file has been chunked or not. -// An alternative to use instead of filename as it might not be unique -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] -struct PathXorName(String); - -impl PathXorName { - fn new(path: &Path) -> PathXorName { - // we just need an unique value per path, thus we don't have to mind between the - // [u8]/[u16] differences - let path_as_lossy_str = path.as_os_str().to_string_lossy(); - let path_xor = XorName::from_content(path_as_lossy_str.as_bytes()); - PathXorName(hex::encode(path_xor)) - } -} - -/// Info about a file that has been chunked -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] -pub struct ChunkedFile { - pub file_path: PathBuf, - pub file_name: OsString, - pub head_chunk_address: ChunkAddress, - pub chunks: BTreeSet<(XorName, PathBuf)>, - pub data_map: Chunk, -} - -/// Manages the chunking process by resuming pre-chunked files and chunking any -/// file that has not been chunked yet. -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] -pub struct ChunkManager { - /// Whole client root dir - root_dir: PathBuf, - /// Dir for chunk artifacts - artifacts_dir: PathBuf, - files_to_chunk: Vec<(OsString, PathXorName, PathBuf)>, - chunks: BTreeMap, - completed_files: Vec<(PathBuf, OsString, ChunkAddress)>, - resumed_chunk_count: usize, - resumed_files_count: usize, -} - -impl ChunkManager { - // Provide the root_dir. The function creates a sub-directory to store the SE chunks - pub fn new(root_dir: &Path) -> Self { - let artifacts_dir = root_dir.join(CHUNK_ARTIFACTS_DIR); - Self { - root_dir: root_dir.to_path_buf(), - artifacts_dir, - files_to_chunk: Default::default(), - chunks: Default::default(), - completed_files: Default::default(), - resumed_files_count: 0, - resumed_chunk_count: 0, - } - } - - /// Chunk all the files in the provided `files_path` - /// These are stored to the CHUNK_ARTIFACTS_DIR - /// if read_cache is true, will take cache from previous runs into account - /// - /// # Arguments - /// * files_path - &[Path] - /// * read_cache - Boolean. Set to true to resume the chunks from the artifacts dir. - /// * include_data_maps - Boolean. If set to true, will append all the ChunkedFile.data_map chunks - pub fn chunk_path( - &mut self, - files_path: &Path, - read_cache: bool, - include_data_maps: bool, - ) -> Result<()> { - self.chunk_with_iter( - WalkDir::new(files_path).into_iter().flatten(), - read_cache, - include_data_maps, - ) - } - - /// Return the filename and the file's Xor address if all their chunks has been marked as - /// verified - pub(crate) fn already_put_chunks( - &mut self, - entries_iter: impl Iterator, - make_files_public: bool, - ) -> Result> { - self.chunk_with_iter(entries_iter, false, make_files_public)?; - Ok(self.get_chunks()) - } - - /// Chunk all the files in the provided iterator - /// These are stored to the CHUNK_ARTIFACTS_DIR - /// if read_cache is true, will take cache from previous runs into account - pub fn chunk_with_iter( - &mut self, - entries_iter: impl Iterator, - read_cache: bool, - include_data_maps: bool, - ) -> Result<()> { - let now = Instant::now(); - // clean up - self.files_to_chunk = Default::default(); - self.chunks = Default::default(); - self.completed_files = Default::default(); - self.resumed_chunk_count = 0; - self.resumed_files_count = 0; - - // collect the files to chunk - entries_iter.for_each(|entry| { - if entry.file_type().is_file() { - let path_xor = PathXorName::new(entry.path()); - info!( - "Added file {:?} with path_xor: {path_xor:?} to be chunked/resumed", - entry.path() - ); - self.files_to_chunk.push(( - entry.file_name().to_owned(), - path_xor, - entry.into_path(), - )); - } - }); - let total_files = self.files_to_chunk.len(); - - if total_files == 0 { - return Ok(()); - }; - - // resume the chunks from the artifacts dir - if read_cache { - self.resume_path(); - } - - // note the number of chunks that we've resumed - self.resumed_chunk_count = self - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(); - // note the number of files that we've resumed - self.resumed_files_count = self.chunks.keys().collect::>().len(); - - // Filter out files_to_chunk; Any PathXorName in chunks_to_upload is considered to be resumed. - { - let path_xors = self.chunks.keys().collect::>(); - self.files_to_chunk - .retain(|(_, path_xor, _)| !path_xors.contains(path_xor)); - } - - // Get the list of completed files - { - let completed_files = self.chunks.iter().filter_map(|(_, chunked_file)| { - if chunked_file.chunks.is_empty() { - Some(( - chunked_file.file_path.clone(), - chunked_file.file_name.clone(), - chunked_file.head_chunk_address, - )) - } else { - None - } - }); - - self.completed_files.extend(completed_files); - } - - // Return early if no more files to chunk - if self.files_to_chunk.is_empty() { - debug!( - "All files_to_chunk ({total_files:?}) were resumed. Returning the resumed chunks.", - ); - debug!("It took {:?} to resume all the files", now.elapsed()); - return Ok(()); - } - - let progress_bar = get_progress_bar(total_files as u64)?; - progress_bar.println(format!("Chunking {total_files} files...")); - - let artifacts_dir = &self.artifacts_dir.clone(); - let chunked_files = self.files_to_chunk - .par_iter() - .map(|(original_file_name, path_xor, path)| { - let file_chunks_dir = { - let file_chunks_dir = artifacts_dir.join(&path_xor.0); - fs::create_dir_all(&file_chunks_dir).map_err(|err| { - error!("Failed to create folder {file_chunks_dir:?} for SE chunks with error {err:?}!"); - eyre!("Failed to create dir {file_chunks_dir:?} for SE chunks with error {err:?}") - })?; - file_chunks_dir - }; - - match FilesApi::chunk_file(path, &file_chunks_dir, include_data_maps) { - Ok((head_chunk_address, data_map, size, chunks)) => { - progress_bar.clone().inc(1); - debug!("Chunked {original_file_name:?} with {path_xor:?} into file's XorName: {head_chunk_address:?} of size {size}, and chunks len: {}", chunks.len()); - - let chunked_file = ChunkedFile { - head_chunk_address, - file_path: path.to_owned(), - file_name: original_file_name.clone(), - chunks: chunks.into_iter().collect(), - data_map - }; - Ok((path_xor.clone(), chunked_file)) - } - Err(err) => { - println!("Failed to chunk file {path:?}/{path_xor:?} with err: {err:?}"); - error!("Failed to chunk file {path:?}/{path_xor:?} with err: {err:?}"); - Err(eyre!("Failed to chunk file {path:?}/{path_xor:?} with err: {err:?}")) - } - } - }) - .collect::>>()?; - debug!( - "Out of total files_to_chunk {total_files}, we have resumed {} files and chunked {} files", - self.resumed_files_count, - chunked_files.len() - ); - - // Self::resume_path would create an empty self.chunks entry if a file that was fully - // completed was resumed. Thus if it is empty, the user did not provide any valid file - // path. - if chunked_files.is_empty() && self.chunks.is_empty() { - bail!( - "The provided path does not contain any file. Please check your path!\nExiting..." - ); - } - - // write metadata and data_map - chunked_files - .par_iter() - .map(|(path_xor, chunked_file)| { - let metadata_path = artifacts_dir.join(&path_xor.0).join(METADATA_FILE); - - info!("Metadata path is: {metadata_path:?}"); - let metadata = rmp_serde::to_vec(&( - chunked_file.head_chunk_address, - chunked_file.data_map.clone(), - )) - .map_err(|_| { - error!("Failed to serialize file_xor_addr for writing metadata"); - eyre!("Failed to serialize file_xor_addr for writing metadata") - })?; - - let mut metadata_file = File::create(&metadata_path).map_err(|_| { - error!("Failed to create metadata_path {metadata_path:?} for {path_xor:?}"); - eyre!("Failed to create metadata_path {metadata_path:?} for {path_xor:?}") - })?; - - metadata_file.write_all(&metadata).map_err(|_| { - error!("Failed to write metadata to {metadata_path:?} for {path_xor:?}"); - eyre!("Failed to write metadata to {metadata_path:?} for {path_xor:?}") - })?; - - debug!("Wrote metadata for {path_xor:?}"); - Ok(()) - }) - .collect::>()?; - - progress_bar.finish_and_clear(); - debug!("It took {:?} to chunk {} files", now.elapsed(), total_files); - self.chunks.extend(chunked_files); - - Ok(()) - } - - // Try to resume the chunks - fn resume_path(&mut self) { - let artifacts_dir = self.artifacts_dir.clone(); - let resumed = self - .files_to_chunk - .par_iter() - .filter_map(|(original_file_name, path_xor, original_file_path)| { - // if this folder exists, and if we find chunks under this, we upload them. - let file_chunks_dir = artifacts_dir.join(&path_xor.0); - if !file_chunks_dir.exists() { - return None; - } - Self::read_file_chunks_dir( - file_chunks_dir, - path_xor, - original_file_path.clone(), - original_file_name.clone(), - ) - }) - .collect::>(); - - self.chunks.extend(resumed); - } - - /// Get all the chunk name and their path. - /// If include_data_maps is true, append all the ChunkedFile.data_map chunks to the vec - pub fn get_chunks(&self) -> Vec<(XorName, PathBuf)> { - self.chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .cloned() - .collect::>() - } - - pub fn is_chunks_empty(&self) -> bool { - self.chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .next() - .is_none() - } - - /// Mark all the chunks as completed. This removes the chunks from the CHUNK_ARTIFACTS_DIR. - /// But keeps the folder and metadata file that denotes that the file has been already completed. - pub fn mark_completed_all(&mut self) -> Result<()> { - let all_chunks = self - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .map(|(chunk, _)| *chunk) - .collect::>(); - self.mark_completed(all_chunks.into_iter()) - } - - /// Mark a set of chunks as completed and remove them from CHUNK_ARTIFACTS_DIR - /// If the entire file is completed, keep the folder and metadata file - pub fn mark_completed(&mut self, chunks: impl Iterator) -> Result<()> { - let set_of_completed_chunks = chunks.collect::>(); - trace!("marking as completed: {set_of_completed_chunks:?}"); - - // remove those files - self.chunks - .par_iter() - .flat_map(|(_, chunked_file)| &chunked_file.chunks) - .map(|(chunk_xor, chunk_path)| { - if set_of_completed_chunks.contains(chunk_xor) { - debug!("removing {chunk_xor:?} at {chunk_path:?} as it is marked as completed"); - fs::remove_file(chunk_path).map_err(|_err| { - error!("Failed to remove SE chunk {chunk_xor} from {chunk_path:?}"); - eyre!("Failed to remove SE chunk {chunk_xor} from {chunk_path:?}") - })?; - } - Ok(()) - }) - .collect::>()?; - - let mut entire_file_is_done = BTreeSet::new(); - // remove the entries from the struct - self.chunks.iter_mut().for_each(|(path_xor, chunked_file)| { - chunked_file - .chunks - // if chunk is part of completed_chunks, return false to remove it - .retain(|(chunk_xor, _)| !set_of_completed_chunks.contains(chunk_xor)); - if chunked_file.chunks.is_empty() { - entire_file_is_done.insert(path_xor.clone()); - } - }); - - for path_xor in &entire_file_is_done { - // todo: should we remove the entry? ig so - if let Some(chunked_file) = self.chunks.remove(path_xor) { - trace!("removed {path_xor:?} from chunks list"); - - self.completed_files.push(( - chunked_file.file_path.clone(), - chunked_file.file_name.clone(), - chunked_file.head_chunk_address, - )); - - let uploaded_file_metadata = UploadedFile { - filename: chunked_file.file_name, - data_map: Some(chunked_file.data_map.value), - }; - // errors are logged by write() - let _result = - uploaded_file_metadata.write(&self.root_dir, &chunked_file.head_chunk_address); - } - } - Ok(()) - - // let mut entire_file_is_done = BTreeSet::new(); - // // remove the entries from the struct - // self.chunks.iter_mut().for_each(|(path_xor, chunked_file)| { - // chunked_file - // .chunks - // // if chunk is part of completed_chunks, return false to remove it - // .retain(|(chunk_xor, _)| !set_of_completed_chunks.contains(chunk_xor)); - // if chunked_file.chunks.is_empty() { - // entire_file_is_done.insert(path_xor.clone()); - // } - // }); - - // for path_xor in &entire_file_is_done { - // // todo: should we remove the entry? ig so - // if let Some(chunked_file) = self.chunks.remove(path_xor) { - // trace!("removed {path_xor:?} from chunks list"); - // self.verified_files - // .push((chunked_file.file_name, chunked_file.head_chunk_address)); - // } - // } - } - - /// Return the filename and the file's Xor address if all their chunks has been marked as - /// completed - pub(crate) fn completed_files(&self) -> &Vec<(PathBuf, OsString, ChunkAddress)> { - &self.completed_files - } - - /// Return the list of Filenames that have some chunks that are yet to be marked as completed. - pub(crate) fn incomplete_files(&self) -> Vec<(&PathBuf, &OsString, &ChunkAddress)> { - self.chunks - .values() - .map(|chunked_file| { - ( - &chunked_file.file_path, - &chunked_file.file_name, - &chunked_file.head_chunk_address, - ) - }) - .collect() - } - - /// Returns an iterator over the list of chunked files - pub(crate) fn iter_chunked_files(&mut self) -> impl Iterator { - self.chunks.values() - } - - // Try to read the chunks from `file_chunks_dir` - // Returns the ChunkedFile if the metadata file exists - // file_chunks_dir: artifacts_dir/path_xor - // path_xor: Used during logging and is returned - // original_file_name: Used to create ChunkedFile - fn read_file_chunks_dir( - file_chunks_dir: PathBuf, - path_xor: &PathXorName, - original_file_path: PathBuf, - original_file_name: OsString, - ) -> Option<(PathXorName, ChunkedFile)> { - let mut file_chunk_address: Option = None; - let mut data_map = Chunk::new(Bytes::new()); - debug!("Trying to resume {path_xor:?} as the file_chunks_dir exists"); - - let chunks = WalkDir::new(file_chunks_dir.clone()) - .into_iter() - .flatten() - .filter_map(|entry| { - if !entry.file_type().is_file() { - return None; - } - if entry.file_name() == METADATA_FILE { - if let Some((address, optional_data_map)) = - Self::try_read_metadata(entry.path()) - { - file_chunk_address = Some(address); - data_map = optional_data_map; - debug!("Obtained metadata for {path_xor:?}"); - } else { - error!("Could not read metadata for {path_xor:?}"); - } - // not a chunk, so don't return - return None; - } - - // try to get the chunk's xorname from its filename - if let Some(file_name) = entry.file_name().to_str() { - Self::hex_decode_xorname(file_name) - .map(|chunk_xorname| (chunk_xorname, entry.into_path())) - } else { - error!( - "Failed to convert OsString to str for {:?}", - entry.file_name() - ); - None - } - }) - .collect::>(); - - match file_chunk_address { - Some(head_chunk_address) => { - debug!("Resuming {} chunks for file {original_file_name:?} and with file_xor_addr {head_chunk_address:?}/{path_xor:?}", chunks.len()); - - Some(( - path_xor.clone(), - ChunkedFile { - file_path: original_file_path, - file_name: original_file_name, - head_chunk_address, - chunks, - data_map, - }, - )) - } - _ => { - error!("Metadata file or data map was not present for {path_xor:?}"); - // metadata file or data map was not present/was not read - None - } - } - } - - /// Try to read the metadata file - /// Returning (head_chunk_address, datamap Chunk) - fn try_read_metadata(path: &Path) -> Option<(ChunkAddress, Chunk)> { - let metadata = fs::read(path) - .map_err(|err| error!("Failed to read metadata with err {err:?}")) - .ok()?; - // head chunk address and the final datamap contents if a datamap exists for this file - let metadata: (ChunkAddress, Chunk) = rmp_serde::from_slice(&metadata) - .map_err(|err| error!("Failed to deserialize metadata with err {err:?}")) - .ok()?; - - Some(metadata) - } - - // Decode the hex encoded xorname - fn hex_decode_xorname(string: &str) -> Option { - let hex_decoded = hex::decode(string) - .map_err(|err| error!("Failed to decode {string} into bytes with err {err:?}")) - .ok()?; - let decoded_xorname: [u8; xor_name::XOR_NAME_LEN] = hex_decoded - .try_into() - .map_err(|_| error!("Failed to convert hex_decoded xorname into an [u8; 32]")) - .ok()?; - Some(XorName(decoded_xorname)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use color_eyre::{eyre::eyre, Result}; - use rand::{thread_rng, Rng}; - use rayon::prelude::IntoParallelIterator; - use sn_logging::LogBuilder; - use tempfile::TempDir; - - /// Assert any collection/iterator even if their orders do not match. - pub fn assert_list_eq(a: I, b: J) - where - K: Eq + Clone, - I: IntoIterator, - J: IntoIterator, - { - let vec1: Vec<_> = a.into_iter().collect::>(); - let mut vec2: Vec<_> = b.into_iter().collect(); - - assert_eq!(vec1.len(), vec2.len()); - - for item1 in &vec1 { - let idx2 = vec2 - .iter() - .position(|item2| item1 == item2) - .expect("Item not found in second list"); - - vec2.swap_remove(idx2); - } - - assert_eq!(vec2.len(), 0); - } - - #[test] - fn chunked_files_should_be_written_to_artifacts_dir() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - let artifacts_dir = manager.artifacts_dir.clone(); - let _ = create_random_files(&random_files_dir, 1, 1)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let chunks = manager.get_chunks(); - // 1. 1mb file produces 4 chunks - assert_eq!(chunks.len(), 4); - - // 2. make sure we have 1 folder == 1 file - let n_folders = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| entry.file_type().is_dir() && entry.path() != artifacts_dir) - .count(); - assert_eq!(n_folders, 1); - - // 3. make sure we have the 1 files per chunk, + 1 datamap + 1 metadata file - let n_files = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| { - info!("direntry {entry:?}"); - entry.file_type().is_file() - }) - .count(); - assert_eq!(n_files, chunks.len() + 1); - - // 4. make sure metadata file holds the correct file_xor_addr - let mut file_xor_addr_from_metadata = None; - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - if entry.file_type().is_file() && entry.file_name() == METADATA_FILE { - let metadata = ChunkManager::try_read_metadata(entry.path()); - - if let Some((head_chunk_addr, _datamap)) = metadata { - file_xor_addr_from_metadata = Some(head_chunk_addr); - } - } - } - let file_xor_addr_from_metadata = - file_xor_addr_from_metadata.expect("The metadata file should be present"); - let file_xor_addr = manager - .chunks - .values() - .next() - .expect("1 file should be present") - .head_chunk_address; - assert_eq!(file_xor_addr_from_metadata, file_xor_addr); - - // 5. make sure the chunked file's name is the XorName of that chunk - let chunk_xornames = manager - .chunks - .values() - .next() - .expect("We must have 1 file here") - .chunks - .iter() - .map(|(xor_name, _)| *xor_name) - .collect::>(); - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - assert!(chunk_xornames.contains(&chunk_xorname_from_filename)); - } - } - - Ok(()) - } - - #[test] - fn no_datamap_chunked_files_should_be_written_to_artifacts_dir_when_not_public() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - let artifacts_dir = manager.artifacts_dir.clone(); - let _ = create_random_files(&random_files_dir, 1, 1)?; - - // we do NOT want to include or write the data_map chunk here - manager.chunk_path(&random_files_dir, true, false)?; - - let chunks = manager.get_chunks(); - // 1. 1mb file produces 3 chunks without the datamap - assert_eq!(chunks.len(), 3); - - // 2. make sure we have 1 folder == 1 file - let n_folders = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| entry.file_type().is_dir() && entry.path() != artifacts_dir) - .count(); - assert_eq!(n_folders, 1); - - // 3. make sure we have the 1 files per chunk, + 1 metadata file - let n_files = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| { - info!("direntry {entry:?}"); - entry.file_type().is_file() - }) - .count(); - assert_eq!(n_files, chunks.len() + 1); - - // 4. make sure metadata file holds the correct file_xor_addr - let mut file_xor_addr_from_metadata = None; - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - if entry.file_type().is_file() && entry.file_name() == METADATA_FILE { - let metadata = ChunkManager::try_read_metadata(entry.path()); - - if let Some((head_chunk_addr, _datamap)) = metadata { - file_xor_addr_from_metadata = Some(head_chunk_addr); - } - } - } - let file_xor_addr_from_metadata = - file_xor_addr_from_metadata.expect("The metadata file should be present"); - let file_xor_addr = manager - .chunks - .values() - .next() - .expect("1 file should be present") - .head_chunk_address; - assert_eq!(file_xor_addr_from_metadata, file_xor_addr); - - // 5. make sure the chunked file's name is the XorName of that chunk - let chunk_xornames = manager - .chunks - .values() - .next() - .expect("We must have 1 file here") - .chunks - .iter() - .map(|(xor_name, _)| *xor_name) - .collect::>(); - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - assert!(chunk_xornames.contains(&chunk_xorname_from_filename)); - } - } - - Ok(()) - } - - #[test] - fn chunks_should_be_removed_from_artifacts_dir_if_marked_as_completed() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 1, 1)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let path_xor = manager.chunks.keys().next().unwrap().clone(); - let chunked_file = manager.chunks.values().next().unwrap().clone(); - let file_xor_addr = chunked_file.head_chunk_address; - let (chunk, _) = chunked_file - .chunks - .first() - .expect("Must contain 1 chunk") - .clone(); - let total_chunks = manager.chunks.values().next().unwrap().chunks.len(); - manager.mark_completed(vec![chunk].into_iter())?; - - // 1. chunk should be removed from the struct - assert_eq!( - manager - .chunks - .values() - .next() - .expect("Since the file was not fully completed, it should be present") - .chunks - .len(), - total_chunks - 1, - ); - - // 2. the folder should exists, but chunk removed - let file_chunks_dir = manager.artifacts_dir.join(&path_xor.0); - let (path_xor_from_dir, chunked_file_from_dir) = ChunkManager::read_file_chunks_dir( - file_chunks_dir, - &path_xor, - chunked_file.file_path, - chunked_file.file_name, - ) - .expect("Folder and metadata should be present"); - assert_eq!(chunked_file_from_dir.chunks.len(), total_chunks - 1); - assert_eq!(chunked_file_from_dir.head_chunk_address, file_xor_addr); - assert_eq!(path_xor_from_dir, path_xor); - - // 2. file should not be marked as completed - assert!(manager.completed_files.is_empty()); - - Ok(()) - } - - #[test] - fn marking_all_chunks_as_completed_should_not_remove_the_dir() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - // cloned after chunking - let manager_clone = manager.clone(); - - let n_folders = WalkDir::new(&manager.artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| entry.file_type().is_dir() && entry.path() != manager.artifacts_dir) - .count(); - assert_eq!(n_folders, 5); - - manager.mark_completed_all()?; - - // all 5 files should be marked as completed - assert_eq!(manager.completed_files.len(), 5); - - // all 5 folders should exist - for (path_xor, chunked_file) in manager_clone.chunks.iter() { - let file_chunks_dir = manager_clone.artifacts_dir.join(path_xor.0.clone()); - let (path_xor_from_dir, chunked_file_from_dir) = ChunkManager::read_file_chunks_dir( - file_chunks_dir, - path_xor, - chunked_file.file_path.clone(), - chunked_file.file_name.to_owned(), - ) - .expect("Folder and metadata should be present"); - assert_eq!(chunked_file_from_dir.chunks.len(), 0); - assert_eq!( - chunked_file_from_dir.head_chunk_address, - chunked_file.head_chunk_address - ); - assert_eq!(&path_xor_from_dir, path_xor); - } - - Ok(()) - } - - #[test] - fn mark_none_and_resume() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, root_dir, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let mut new_manager = ChunkManager::new(&root_dir); - new_manager.chunk_path(&random_files_dir, true, true)?; - - // 1. make sure the chunk counts match - let total_chunk_count = manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(); - assert_eq!(manager.resumed_chunk_count, 0); - assert_eq!(new_manager.resumed_chunk_count, total_chunk_count); - - // 2. assert the two managers - assert_eq!(manager.chunks, new_manager.chunks); - assert_eq!(manager.completed_files, new_manager.completed_files); - - Ok(()) - } - - #[test] - fn mark_one_chunk_and_resume() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, root_dir, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let total_chunks_count = manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(); - - // mark a chunk as completed - let removed_chunk = manager - .chunks - .values() - .next() - .expect("Atleast 1 file should be present") - .chunks - .iter() - .next() - .expect("Chunk should be present") - .0; - manager.mark_completed([removed_chunk].into_iter())?; - let mut new_manager = ChunkManager::new(&root_dir); - new_manager.chunk_path(&random_files_dir, true, true)?; - - // 1. we should have 1 completed chunk and (total_chunks_count-1) incomplete chunks - assert_eq!(manager.resumed_chunk_count, 0); - assert_eq!(new_manager.resumed_chunk_count, total_chunks_count - 1); - // also check the structs - assert_eq!( - new_manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(), - total_chunks_count - 1 - ); - - // 2. files should not be added to completed files - assert_eq!(new_manager.completed_files.len(), 0); - - Ok(()) - } - - #[test] - fn mark_all_and_resume() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, root_dir, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - manager.mark_completed_all()?; - - let mut new_manager = ChunkManager::new(&root_dir); - new_manager.chunk_path(&random_files_dir, true, true)?; - - // 1. we should have chunk entries, but 0 chunks inside them - assert_eq!(new_manager.chunks.len(), 5); - assert_eq!( - new_manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(), - 0 - ); - // 2. the resumed stats should be 0 - assert_eq!(new_manager.resumed_chunk_count, 0); - - // 3. make sure the files are added to completed list - assert_eq!(new_manager.completed_files.len(), 5); - - Ok(()) - } - - #[test] - fn absence_of_metadata_file_should_re_chunk_the_entire_file() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _root_dir, random_files_dir) = init_manager()?; - - let mut random_files = create_random_files(&random_files_dir, 1, 1)?; - let random_file = random_files.remove(0); - manager.chunk_path(&random_files_dir, true, true)?; - - let mut old_chunks_list = BTreeSet::new(); - for entry in WalkDir::new(&manager.artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - old_chunks_list.insert(chunk_xorname_from_filename); - } - } - - // remove metadata file from artifacts_dir - let path_xor = PathXorName::new(&random_file); - let metadata_path = manager.artifacts_dir.join(path_xor.0).join(METADATA_FILE); - fs::remove_file(&metadata_path)?; - - // use the same manager to chunk the path - manager.chunk_path(&random_files_dir, true, true)?; - // nothing should be resumed - assert_eq!(manager.resumed_chunk_count, 0); - // but it should be re-chunked - assert_eq!( - manager.get_chunks().len(), - 4, - "we have correct chunk len including data_map" - ); - // metadata file should be created - assert!(metadata_path.exists()); - - let mut new_chunks_list = BTreeSet::new(); - for entry in WalkDir::new(&manager.artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - new_chunks_list.insert(chunk_xorname_from_filename); - } - } - assert_list_eq(new_chunks_list, old_chunks_list); - - Ok(()) - } - - fn init_manager() -> Result<(TempDir, ChunkManager, PathBuf, PathBuf)> { - let tmp_dir = tempfile::tempdir()?; - let random_files_dir = tmp_dir.path().join("random_files"); - let root_dir = tmp_dir.path().join("root_dir"); - fs::create_dir_all(&random_files_dir)?; - fs::create_dir_all(&root_dir)?; - let manager = ChunkManager::new(&root_dir); - - Ok((tmp_dir, manager, root_dir, random_files_dir)) - } - - fn create_random_files( - at: &Path, - num_files: usize, - mb_per_file: usize, - ) -> Result> { - let files = (0..num_files) - .into_par_iter() - .filter_map(|i| { - let mut path = at.to_path_buf(); - path.push(format!("random_file_{i}")); - match generate_file(&path, mb_per_file) { - Ok(_) => Some(path), - Err(err) => { - error!("Failed to generate random file with {err:?}"); - None - } - } - }) - .collect::>(); - if files.len() < num_files { - return Err(eyre!("Failed to create a Failedkk")); - } - Ok(files) - } - - fn generate_file(path: &PathBuf, file_size_mb: usize) -> Result<()> { - let mut file = File::create(path)?; - let mut rng = thread_rng(); - - // can create [u8; 32] max at time. Thus each mb has 1024*32 such small chunks - let n_small_chunks = file_size_mb * 1024 * 32; - for _ in 0..n_small_chunks { - let random_data: [u8; 32] = rng.gen(); - file.write_all(&random_data)?; - } - let size = file.metadata()?.len() as f64 / (1024 * 1024) as f64; - assert_eq!(file_size_mb as f64, size); - - Ok(()) - } -} diff --git a/sn_cli/src/files/download.rs b/sn_cli/src/files/download.rs deleted file mode 100644 index d95f0a0646..0000000000 --- a/sn_cli/src/files/download.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - get_progress_bar, - upload::{UploadedFile, UPLOADED_FILES}, -}; - -use std::collections::BTreeSet; -use std::ffi::OsString; -use std::path::Path; - -use color_eyre::Result; -use indicatif::ProgressBar; -use walkdir::WalkDir; -use xor_name::XorName; - -use crate::utils::duration_to_minute_seconds_miliseconds_string; -use sn_client::{ - protocol::storage::{Chunk, ChunkAddress, RetryStrategy}, - FilesApi, FilesDownload, FilesDownloadEvent, -}; -use tracing::{debug, error, info}; - -/// The default folder to download files to. -const DOWNLOAD_FOLDER: &str = "safe_files"; - -pub async fn download_files( - files_api: &FilesApi, - root_dir: &Path, - show_holders: bool, - batch_size: usize, - retry_strategy: RetryStrategy, -) -> Result<()> { - info!("Downloading with batch size of {}", batch_size); - let uploaded_files_path = root_dir.join(UPLOADED_FILES); - let download_path = dirs_next::download_dir() - .unwrap_or(root_dir.to_path_buf()) - .join(DOWNLOAD_FOLDER); - std::fs::create_dir_all(download_path.as_path())?; - - let mut uploaded_files = BTreeSet::new(); - - for entry in WalkDir::new(uploaded_files_path.clone()) { - let entry = entry?; - let path = entry.path(); - if path.is_file() { - let hex_xorname = path - .file_name() - .expect("Uploaded file to have name") - .to_str() - .expect("Failed to convert path to string"); - let bytes = hex::decode(hex_xorname)?; - let xor_name_bytes: [u8; 32] = bytes - .try_into() - .expect("Failed to parse XorName from hex string"); - let xor_name = XorName(xor_name_bytes); - let address = ChunkAddress::new(xor_name); - - let uploaded_file_metadata = UploadedFile::read(path)?; - let datamap_chunk = uploaded_file_metadata.data_map.map(|bytes| Chunk { - address, - value: bytes, - }); - uploaded_files.insert((xor_name, (uploaded_file_metadata.filename, datamap_chunk))); - } - } - - for (xorname, file_data) in uploaded_files.into_iter() { - download_file( - files_api.clone(), - xorname, - file_data, - &download_path, - show_holders, - batch_size, - retry_strategy, - ) - .await; - } - - Ok(()) -} - -pub async fn download_file( - files_api: FilesApi, - xor_name: XorName, - // original file name and optional datamap chunk - (file_name, datamap): (OsString, Option), - download_path: &Path, - show_holders: bool, - batch_size: usize, - retry_strategy: RetryStrategy, -) { - let start_time = std::time::Instant::now(); - - let mut files_download = FilesDownload::new(files_api.clone()) - .set_batch_size(batch_size) - .set_show_holders(show_holders) - .set_retry_strategy(retry_strategy); - - println!("Downloading {file_name:?} from {xor_name:64x} with batch-size {batch_size}"); - debug!("Downloading {file_name:?} from {:64x}", xor_name); - let downloaded_file_path = download_path.join(&file_name); - - let mut download_events_rx = files_download.get_events(); - - let progress_handler = tokio::spawn(async move { - let mut progress_bar: Option = None; - - // The loop is guaranteed to end, as the channel will be closed when the download completes or errors out. - while let Some(event) = download_events_rx.recv().await { - match event { - FilesDownloadEvent::Downloaded(_) => { - if let Some(progress_bar) = &progress_bar { - progress_bar.inc(1); - } - } - FilesDownloadEvent::ChunksCount(count) => { - // terminate the progress bar from datamap download. - if let Some(progress_bar) = progress_bar { - progress_bar.finish_and_clear(); - } - progress_bar = get_progress_bar(count as u64).map_err(|err|{ - println!("Unable to initialize progress bar. The download process will continue without a progress bar."); - error!("Failed to obtain progress bar with err: {err:?}"); - err - }).ok(); - } - FilesDownloadEvent::DatamapCount(count) => { - // terminate the progress bar if it was loaded here. This should not happen. - if let Some(progress_bar) = progress_bar { - progress_bar.finish_and_clear(); - } - progress_bar = get_progress_bar(count as u64).map_err(|err|{ - println!("Unable to initialize progress bar. The download process will continue without a progress bar."); - error!("Failed to obtain progress bar with err: {err:?}"); - err - }).ok(); - } - FilesDownloadEvent::Error => { - error!("Got FilesDownloadEvent::Error"); - } - } - } - - if let Some(progress_bar) = progress_bar { - progress_bar.finish_and_clear(); - } - }); - - let download_result = files_download - .download_file_to_path( - ChunkAddress::new(xor_name), - datamap, - downloaded_file_path.clone(), - ) - .await; - - let duration = start_time.elapsed(); - - // await on the progress handler first as we want to clear the progress bar before printing things. - let _ = progress_handler.await; - match download_result { - Ok(_) => { - debug!( - "Saved {file_name:?} at {}", - downloaded_file_path.to_string_lossy() - ); - println!( - "Saved {file_name:?} at {}", - downloaded_file_path.to_string_lossy() - ); - let elapsed_time = duration_to_minute_seconds_miliseconds_string(duration); - println!("File downloaded in {elapsed_time}"); - } - Err(error) => { - error!("Error downloading {file_name:?}: {error}"); - println!("Error downloading {file_name:?}: {error}") - } - } -} diff --git a/sn_cli/src/files/estimate.rs b/sn_cli/src/files/estimate.rs deleted file mode 100644 index a5c16f4a03..0000000000 --- a/sn_cli/src/files/estimate.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::ChunkManager; - -use std::path::{Path, PathBuf}; - -use color_eyre::Result; - -use sn_client::{ - protocol::{storage::ChunkAddress, NetworkAddress}, - transfers::NanoTokens, - FilesApi, -}; - -pub struct Estimator { - chunk_manager: ChunkManager, - files_api: FilesApi, -} - -impl Estimator { - pub fn new(chunk_manager: ChunkManager, files_api: FilesApi) -> Self { - Self { - chunk_manager, - files_api, - } - } - - /// Estimate the upload cost of a chosen file - pub async fn estimate_cost( - mut self, - path: PathBuf, - make_data_public: bool, - root_dir: &Path, - ) -> Result<()> { - self.chunk_manager - .chunk_path(&path, false, make_data_public)?; - - let mut estimate: u64 = 0; - - let balance = FilesApi::new(self.files_api.client().clone(), root_dir.to_path_buf()) - .wallet()? - .balance() - .as_nano(); - - for (chunk_address, _location) in self.chunk_manager.get_chunks() { - let c = self.files_api.clone(); - - tokio::spawn(async move { - let (_peer, _cost, quote) = c - .wallet() - .expect("estimate_cost: Wallet error.") - .get_store_cost_at_address(NetworkAddress::from_chunk_address( - ChunkAddress::new(chunk_address), - )) - .await - .expect("estimate_cost: Error with file."); - quote.cost.as_nano() - }) - .await - .map(|nanos| estimate += nanos) - .expect("estimate_cost: Concurrency error."); - } - - let total = balance.saturating_sub(estimate); - - println!("**************************************"); - println!("Your current balance: {}", NanoTokens::from(balance)); - println!("Transfer cost estimate: {}", NanoTokens::from(estimate)); - println!( - "Your balance estimate after transfer: {}", - NanoTokens::from(total) - ); - println!("**************************************"); - - Ok(()) - } -} diff --git a/sn_cli/src/files/files_uploader.rs b/sn_cli/src/files/files_uploader.rs deleted file mode 100644 index 6e20f2e788..0000000000 --- a/sn_cli/src/files/files_uploader.rs +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::get_progress_bar; -use crate::utils::duration_to_minute_seconds_string; -use crate::ChunkManager; -use bytes::Bytes; -use color_eyre::{eyre::eyre, Report, Result}; -use futures::StreamExt; -use rand::prelude::SliceRandom; -use rand::thread_rng; -use sn_client::{ - transfers::{TransferError, WalletError}, - Client, Error as ClientError, UploadCfg, UploadEvent, UploadSummary, Uploader, -}; -use sn_protocol::storage::{Chunk, ChunkAddress}; -use std::{ - ffi::OsString, - path::{Path, PathBuf}, - time::{Duration, Instant}, -}; -use tokio::{sync::mpsc::Receiver, task::JoinHandle}; -use tracing::{debug, error, info, warn}; -use walkdir::{DirEntry, WalkDir}; -use xor_name::XorName; - -/// The result of a successful files upload. -pub struct FilesUploadSummary { - /// The cost and count summary of the upload. - pub upload_summary: UploadSummary, - /// The list of completed files (FilePath, FileName, HeadChunkAddress) - pub completed_files: Vec<(PathBuf, OsString, ChunkAddress)>, - /// The list of incomplete files (FilePath, FileName, HeadChunkAddress) - pub incomplete_files: Vec<(PathBuf, OsString, ChunkAddress)>, -} - -/// A trait designed to customize the standard output behavior for file upload processes. -pub trait FilesUploadStatusNotifier: Send { - fn collect_entries(&mut self, entries_iter: Vec); - fn collect_paths(&mut self, path: &Path); - fn on_verifying_uploaded_chunks_init(&self, chunks_len: usize); - fn on_verifying_uploaded_chunks_success( - &self, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ); - fn on_verifying_uploaded_chunks_failure(&self, failed_chunks_len: usize); - fn on_failed_to_upload_all_files( - &self, - incomplete_files: Vec<(&PathBuf, &OsString, &ChunkAddress)>, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ); - fn on_chunking_complete( - &self, - upload_cfg: &UploadCfg, - make_data_public: bool, - chunks_to_upload_len: usize, - ); - fn on_upload_complete( - &self, - upload_sum: &UploadSummary, - elapsed_time: Duration, - chunks_to_upload_len: usize, - ); -} - -/// Combines the `Uploader` along with the `ChunkManager` -pub struct FilesUploader { - client: Client, - root_dir: PathBuf, - /// entries to upload - entries_to_upload: Vec, - /// The status notifier that can be overridden to perform custom actions instead of printing things to stdout. - status_notifier: Option>, - /// config - make_data_public: bool, - upload_cfg: UploadCfg, -} - -impl FilesUploader { - pub fn new(client: Client, root_dir: PathBuf) -> Self { - let status_notifier = Box::new(StdOutPrinter { - file_paths_to_print: Default::default(), - }); - Self { - client, - root_dir, - entries_to_upload: Default::default(), - status_notifier: Some(status_notifier), - make_data_public: false, - upload_cfg: Default::default(), - } - } - - pub fn set_upload_cfg(mut self, cfg: UploadCfg) -> Self { - self.upload_cfg = cfg; - self - } - - pub fn set_make_data_public(mut self, make_data_public: bool) -> Self { - self.make_data_public = make_data_public; - self - } - - /// Override the default status notifier. By default we print things to stdout. - pub fn set_status_notifier( - mut self, - status_notifier: Box, - ) -> Self { - self.status_notifier = Some(status_notifier); - self - } - - pub fn insert_entries(mut self, entries_iter: impl IntoIterator) -> Self { - self.entries_to_upload.extend(entries_iter); - self - } - - pub fn insert_path(mut self, path: &Path) -> Self { - if let Some(notifier) = &mut self.status_notifier { - notifier.collect_paths(path); - } - let entries = WalkDir::new(path).into_iter().flatten(); - self.entries_to_upload.extend(entries); - self - } - - pub async fn start_upload(mut self) -> Result { - let mut chunk_manager = ChunkManager::new(&self.root_dir); - let chunks_to_upload = self.get_chunks_to_upload(&mut chunk_manager).await?; - let chunks_to_upload_len = chunks_to_upload.len(); - - // Notify on chunking complete - if let Some(notifier) = &self.status_notifier { - notifier.on_chunking_complete( - &self.upload_cfg, - self.make_data_public, - chunks_to_upload_len, - ); - } - - let now = Instant::now(); - let mut uploader = Uploader::new(self.client, self.root_dir); - uploader.set_upload_cfg(self.upload_cfg); - uploader.insert_chunk_paths(chunks_to_upload); - - let events_handle = Self::spawn_upload_events_handler( - chunk_manager, - self.make_data_public, - chunks_to_upload_len, - uploader.get_event_receiver(), - self.status_notifier.take(), - )?; - - let upload_sum = match uploader.start_upload().await { - Ok(summary) => summary, - Err(ClientError::Wallet(WalletError::Transfer(TransferError::NotEnoughBalance( - available, - required, - )))) => { - return Err(eyre!( - "Not enough balance in wallet to pay for chunk. \ - We have {available:?} but need {required:?} to pay for the chunk" - )) - } - Err(err) => return Err(eyre!("Failed to upload chunk batch: {err}")), - }; - let (chunk_manager, status_notifier) = events_handle.await??; - self.status_notifier = status_notifier; - - // Notify on upload complete - if let Some(notifier) = &self.status_notifier { - notifier.on_upload_complete(&upload_sum, now.elapsed(), chunks_to_upload_len); - } - - let summary = FilesUploadSummary { - upload_summary: upload_sum, - completed_files: chunk_manager.completed_files().clone(), - incomplete_files: chunk_manager - .incomplete_files() - .into_iter() - .map(|(path, file_name, head_address)| { - (path.clone(), file_name.clone(), *head_address) - }) - .collect(), - }; - Ok(summary) - } - - // This will read from the cache if possible. We only re-verify with the network if the file has been cached but - // there are no pending chunks to upload. - async fn get_chunks_to_upload( - &self, - chunk_manager: &mut ChunkManager, - ) -> Result> { - // Initially try reading from the cache - chunk_manager.chunk_with_iter( - self.entries_to_upload.iter().cloned(), - true, - self.make_data_public, - )?; - // We verify if there are no chunks left to upload. - let mut chunks_to_upload = if !chunk_manager.is_chunks_empty() { - chunk_manager.get_chunks() - } else { - // re chunk it again to get back all the chunks - let chunks = chunk_manager.already_put_chunks( - self.entries_to_upload.iter().cloned(), - self.make_data_public, - )?; - - // Notify on verification init - if let Some(notifier) = &self.status_notifier { - notifier.on_verifying_uploaded_chunks_init(chunks.len()); - } - - let failed_chunks = self.verify_uploaded_chunks(&chunks).await?; - - chunk_manager.mark_completed( - chunks - .into_iter() - .filter(|c| !failed_chunks.contains(c)) - .map(|(xor, _)| xor), - )?; - - if failed_chunks.is_empty() { - // Notify on verification success - if let Some(notifier) = &self.status_notifier { - notifier.on_verifying_uploaded_chunks_success( - chunk_manager.completed_files(), - self.make_data_public, - ); - } - - return Ok(vec![]); - } - // Notify on verification failure - if let Some(notifier) = &self.status_notifier { - notifier.on_verifying_uploaded_chunks_failure(failed_chunks.len()); - } - failed_chunks - }; - // shuffle the chunks - let mut rng = thread_rng(); - chunks_to_upload.shuffle(&mut rng); - - Ok(chunks_to_upload) - } - - async fn verify_uploaded_chunks( - &self, - chunks_paths: &[(XorName, PathBuf)], - ) -> Result> { - let mut stream = futures::stream::iter(chunks_paths) - .map(|(xorname, path)| async move { - let chunk = Chunk::new(Bytes::from(std::fs::read(path)?)); - let res = self.client.verify_chunk_stored(&chunk).await; - Ok::<_, Report>((xorname, path.clone(), res.is_err())) - }) - .buffer_unordered(self.upload_cfg.batch_size); - let mut failed_chunks = Vec::new(); - - while let Some(result) = stream.next().await { - let (xorname, path, is_error) = result?; - if is_error { - warn!("Failed to fetch a chunk {xorname:?}"); - failed_chunks.push((*xorname, path)); - } - } - - Ok(failed_chunks) - } - - #[expect(clippy::type_complexity)] - fn spawn_upload_events_handler( - mut chunk_manager: ChunkManager, - make_data_public: bool, - chunks_to_upload_len: usize, - mut upload_event_rx: Receiver, - status_notifier: Option>, - ) -> Result>)>>> - { - let progress_bar = get_progress_bar(chunks_to_upload_len as u64)?; - let handle = tokio::spawn(async move { - let mut upload_terminated_with_error = false; - // The loop is guaranteed to end, as the channel will be - // closed when the upload completes or errors out. - while let Some(event) = upload_event_rx.recv().await { - match event { - UploadEvent::ChunkUploaded(addr) - | UploadEvent::ChunkAlreadyExistsInNetwork(addr) => { - progress_bar.clone().inc(1); - if let Err(err) = - chunk_manager.mark_completed(std::iter::once(*addr.xorname())) - { - error!("Failed to mark chunk {addr:?} as completed: {err:?}"); - } - } - UploadEvent::Error => { - upload_terminated_with_error = true; - } - UploadEvent::RegisterUploaded { .. } - | UploadEvent::RegisterUpdated { .. } - | UploadEvent::PaymentMade { .. } => {} - } - } - progress_bar.finish_and_clear(); - - // this check is to make sure that we don't partially write to the uploaded_files file if the upload process - // terminates with an error. This race condition can happen as we bail on `upload_result` before we await the - // handler. - if upload_terminated_with_error { - error!("Got UploadEvent::Error inside upload event loop"); - } else { - // Notify on upload failure - if let Some(notifier) = &status_notifier { - notifier.on_failed_to_upload_all_files( - chunk_manager.incomplete_files(), - chunk_manager.completed_files(), - make_data_public, - ); - } - } - - Ok::<_, Report>((chunk_manager, status_notifier)) - }); - - Ok(handle) - } -} - -/// The default -struct StdOutPrinter { - file_paths_to_print: Vec, -} - -impl FilesUploadStatusNotifier for StdOutPrinter { - fn collect_entries(&mut self, _entries_iter: Vec) {} - - fn collect_paths(&mut self, path: &Path) { - self.file_paths_to_print.push(path.to_path_buf()); - } - - fn on_verifying_uploaded_chunks_init(&self, chunks_len: usize) { - println!("Files upload attempted previously, verifying {chunks_len} chunks",); - } - - fn on_verifying_uploaded_chunks_success( - &self, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ) { - println!("All files were already uploaded and verified"); - Self::print_uploaded_msg(make_data_public); - - if completed_files.is_empty() { - println!("chunk_manager doesn't have any verified_files, nor any failed_chunks to re-upload."); - } - Self::print_completed_file_list(completed_files); - } - - fn on_verifying_uploaded_chunks_failure(&self, failed_chunks_len: usize) { - println!("{failed_chunks_len} chunks were uploaded in the past but failed to verify. Will attempt to upload them again..."); - } - - fn on_failed_to_upload_all_files( - &self, - incomplete_files: Vec<(&PathBuf, &OsString, &ChunkAddress)>, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ) { - for (_, file_name, _) in incomplete_files { - if let Some(file_name) = file_name.to_str() { - println!("Unverified file \"{file_name}\", suggest to re-upload again."); - info!("Unverified {file_name}"); - } else { - println!("Unverified file \"{file_name:?}\", suggest to re-upload again."); - info!("Unverified file {file_name:?}"); - } - } - - // log uploaded file information - Self::print_uploaded_msg(make_data_public); - Self::print_completed_file_list(completed_files); - } - - fn on_chunking_complete( - &self, - upload_cfg: &UploadCfg, - make_data_public: bool, - chunks_to_upload_len: usize, - ) { - for path in self.file_paths_to_print.iter() { - debug!( - "Uploading file(s) from {path:?} batch size {:?} will verify?: {}", - upload_cfg.batch_size, upload_cfg.verify_store - ); - if make_data_public { - info!("{path:?} will be made public and linkable"); - println!("{path:?} will be made public and linkable"); - } - } - if self.file_paths_to_print.len() == 1 { - println!( - "Splitting and uploading {:?} into {chunks_to_upload_len} chunks", - self.file_paths_to_print[0] - ); - } else { - println!( - "Splitting and uploading {:?} into {chunks_to_upload_len} chunks", - self.file_paths_to_print - ); - } - } - - fn on_upload_complete( - &self, - upload_sum: &UploadSummary, - elapsed_time: Duration, - chunks_to_upload_len: usize, - ) { - let elapsed = duration_to_minute_seconds_string(elapsed_time); - - println!( - "Among {chunks_to_upload_len} chunks, found {} already existed in network, uploaded \ - the leftover {} chunks in {elapsed}", - upload_sum.skipped_count, upload_sum.uploaded_count, - ); - info!( - "Among {chunks_to_upload_len} chunks, found {} already existed in network, uploaded \ - the leftover {} chunks in {elapsed}", - upload_sum.skipped_count, upload_sum.uploaded_count, - ); - println!("**************************************"); - println!("* Payment Details *"); - println!("**************************************"); - println!( - "Made payment of {:?} for {} chunks", - upload_sum.storage_cost, upload_sum.uploaded_count - ); - println!( - "Made payment of {:?} for royalties fees", - upload_sum.royalty_fees - ); - println!("New wallet balance: {}", upload_sum.final_balance); - } -} - -impl StdOutPrinter { - fn print_completed_file_list(completed_files: &[(PathBuf, OsString, ChunkAddress)]) { - for (_, file_name, addr) in completed_files { - let hex_addr = addr.to_hex(); - if let Some(file_name) = file_name.to_str() { - println!("Uploaded \"{file_name}\" to address {hex_addr}"); - info!("Uploaded {file_name} to {hex_addr}"); - } else { - println!("Uploaded \"{file_name:?}\" to address {hex_addr}"); - info!("Uploaded {file_name:?} to {hex_addr}"); - } - } - } - - fn print_uploaded_msg(make_data_public: bool) { - println!("**************************************"); - println!("* Uploaded Files *"); - if !make_data_public { - println!("* *"); - println!("* These are not public by default. *"); - println!("* Reupload with `-p` option *"); - println!("* to publish the datamaps. *"); - } - println!("**************************************"); - } -} diff --git a/sn_cli/src/files/upload.rs b/sn_cli/src/files/upload.rs deleted file mode 100644 index 2aa13d7dd8..0000000000 --- a/sn_cli/src/files/upload.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use bytes::Bytes; -use color_eyre::Result; -use serde::Deserialize; -use sn_client::protocol::storage::ChunkAddress; -use std::{ffi::OsString, path::Path}; -use tracing::{error, warn}; - -/// Subdir for storing uploaded file into -pub const UPLOADED_FILES: &str = "uploaded_files"; - -/// The metadata related to file that has been uploaded. -/// This is written during upload and read during downloads. -#[derive(Clone, Debug, Deserialize)] -pub struct UploadedFile { - pub filename: OsString, - pub data_map: Option, -} - -impl UploadedFile { - /// Write an UploadedFile to a path identified by the hex of the head ChunkAddress. - /// If you want to update the data_map to None, calling this function will overwrite the previous value. - pub fn write(&self, root_dir: &Path, head_chunk_address: &ChunkAddress) -> Result<()> { - let uploaded_files = root_dir.join(UPLOADED_FILES); - - if !uploaded_files.exists() { - if let Err(error) = std::fs::create_dir_all(&uploaded_files) { - error!("Failed to create {uploaded_files:?} because {error:?}"); - } - } - - let uploaded_file_path = uploaded_files.join(head_chunk_address.to_hex()); - - if self.data_map.is_none() { - warn!( - "No data-map being written for {:?} as it is empty", - self.filename - ); - } - let serialized = - rmp_serde::to_vec(&(&self.filename, &self.data_map)).inspect_err(|_err| { - error!("Failed to serialize UploadedFile"); - })?; - - std::fs::write(&uploaded_file_path, serialized).inspect_err(|_err| { - error!( - "Could not write UploadedFile of {:?} to {uploaded_file_path:?}", - self.filename - ); - })?; - - Ok(()) - } - - pub fn read(path: &Path) -> Result { - let bytes = std::fs::read(path).inspect_err(|_err| { - error!("Error while reading the UploadedFile from {path:?}"); - })?; - let metadata = rmp_serde::from_slice(&bytes).inspect_err(|_err| { - error!("Error while deserializing UploadedFile for {path:?}"); - })?; - Ok(metadata) - } -} diff --git a/sn_cli/src/utils.rs b/sn_cli/src/utils.rs deleted file mode 100644 index 093b939960..0000000000 --- a/sn_cli/src/utils.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use std::time::Duration; - -/// Returns whether a hex string is a valid secret key in hex format. -pub fn is_valid_key_hex(hex: &str) -> bool { - hex.len() == 64 && hex.chars().all(|c| c.is_ascii_hexdigit()) -} - -pub fn duration_to_minute_seconds_string(duration: Duration) -> String { - let elapsed_minutes = duration.as_secs() / 60; - let elapsed_seconds = duration.as_secs() % 60; - if elapsed_minutes > 0 { - format!("{elapsed_minutes} minutes {elapsed_seconds} seconds") - } else { - format!("{elapsed_seconds} seconds") - } -} - -pub fn duration_to_minute_seconds_miliseconds_string(duration: Duration) -> String { - let elapsed_minutes = duration.as_secs() / 60; - let elapsed_seconds = duration.as_secs() % 60; - let elapsed_millis = duration.subsec_millis(); - if elapsed_minutes > 0 { - format!("{elapsed_minutes} minutes {elapsed_seconds} seconds {elapsed_millis} milliseconds") - } else if elapsed_seconds > 0 { - format!("{elapsed_seconds} seconds {elapsed_millis} milliseconds") - } else { - format!("{elapsed_millis} milliseconds") - } -} diff --git a/sn_client/CHANGELOG.md b/sn_client/CHANGELOG.md deleted file mode 100644 index fb045ff82c..0000000000 --- a/sn_client/CHANGELOG.md +++ /dev/null @@ -1,2712 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [0.107.7](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.6...sn_client-v0.107.7) - 2024-06-04 - -### Other -- release -- release -- *(release)* sn_client-v0.107.5/sn_networking-v0.16.3/sn_cli-v0.93.4/sn_node-v0.107.4/node-launchpad-v0.3.5/sn-node-manager-v0.9.4/sn_auditor-v0.1.23/sn_peers_acquisition-v0.3.3/sn_faucet-v0.4.25/sn_node_rpc_client-v0.6.22 -- *(network)* set metrics server to run on localhost - -## [0.107.6](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.5...sn_client-v0.107.6) - 2024-06-04 - -### Fixed -- *(transfer)* mismatched key shall result in decryption error - -### Other -- *(transfer)* make discord_name decryption backward compatible -## [0.107.5](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.4...sn_client-v0.107.5) - 2024-06-04 - -### Other -- *(network)* set metrics server to run on localhost - -## [0.107.4](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.3...sn_client-v0.107.4) - 2024-06-04 - -### Fixed -- *(faucet)* save the transfer not the cashnote for foundation - -### Other -- *(release)* sn_client-v0.107.3/sn_transfers-v0.18.4/sn_cli-v0.93.2/sn_node-v0.107.2/node-launchpad-v0.3.2/sn-node-manager-v0.9.2/sn_auditor-v0.1.20/sn_networking-v0.16.2/sn_protocol-v0.17.2/sn_faucet-v0.4.22/sn_service_management-v0.3.3/sn_node_rpc_client-v0.6.20 - -## [0.107.3](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.2...sn_client-v0.107.3) - 2024-06-03 - -### Fixed -- enable compile time sk setting for faucet/genesis - -## [0.107.2](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.1...sn_client-v0.107.2) - 2024-06-03 - -### Other -- bump versions to enable re-release with env vars at compilation - -## [0.107.0](https://github.com/joshuef/safe_network/compare/sn_client-v0.106.3...sn_client-v0.107.0) - 2024-06-03 - -### Added -- *(faucet)* write foundation cash note to disk -- *(client)* read existing mnemonic from disk if avilable -- integrate DAG crawling fixes from Josh and Qi -- *(networking)* add UPnP metrics -- *(network)* [**breaking**] move network versioning away from sn_protocol -- *(keys)* enable compile or runtime override of keys -- *(launchpad)* use nat detection server to determine the nat status - -### Fixed -- *(networking)* upnp feature gates for metrics -- *(networking)* conditional upnp metrics - -### Other -- rename DAG building to crawling -- spend verification error management -- *(networking)* cargo fmt -- use secrets during build process -- *(release)* sn_auditor-v0.1.17/sn_client-v0.106.3/sn_networking-v0.15.3/sn_transfers-v0.18.1/sn_logging-v0.2.27/sn_cli-v0.92.0/sn_faucet-v0.4.19/sn_node-v0.106.5/sn_service_management-v0.3.0/node-launchpad-v0.2.0/sn-node-manager-v0.8.0/sn_protocol-v0.16.7/sn_node_rpc_client-v0.6.18 - -## [0.106.3](https://github.com/joshuef/safe_network/compare/sn_client-v0.106.2...sn_client-v0.106.3) - 2024-05-24 - -### Added -- improved spend verification with DAG and fault detection -- upgrade cli audit to use DAG -- remove two uneeded env vars -- pass genesis_cn pub fields separate to hide sk -- hide genesis keypair -- pass sk_str via cli opt -- *(node)* use separate keys of Foundation and Royalty -- *(wallet)* ensure genesis wallet attempts to load from local on init first -- *(faucet)* increase initial balance -- *(faucet)* make gifting server feat dependent -- *(faucet)* send small amount to faucet, rest to foundation -- *(faucet)* add feat for gifting-from-genesis -- *(audit)* intercept sender of the payment forward -- *(audit)* collect payment forward statistics -- spend reason enum and sized cipher -- *(metrics)* expose store cost value -- keep track of the estimated network size metric -- record lip2p relay and dctur metrics -- *(node)* periodically forward reward to specific address -- use default keys for genesis, or override -- use different key for payment forward -- hide genesis keypair -- tracking beta rewards from the DAG - -### Fixed -- *(uploader)* do not error out immediately on max repayment errors -- *(node)* notify fetch completion earlier to avoid being skipped -- avoid adding mixed type addresses into RT -- enable libp2p metrics to be captured -- correct genesis_pk naming -- genesis_cn public fields generated from hard coded value -- invalid spend reason in data payments - -### Other -- *(uploader)* return summary when upload fails due to max repayments -- *(uploader)* return the list of max repayment reached items -- improve cli DAG collection -- remove now unused mostly duplicated code -- improve DAG verification redundancy -- *(faucet)* devskim ignore -- *(faucet)* log existing faucet balance if non-zero -- *(faucet)* add foundation PK as const -- *(faucet)* clarify logs for verification -- increase initial faucet balance -- add temp log -- *(faucet)* refresh cashnotes on fund -- devSkim ignore foundation pub temp key -- update got 'gifting-from-genesis' faucet feat -- make open metrics feature default but without starting it by default -- Revert "feat(node): make spend and cash_note reason field configurable" -- Revert "feat(cli): track spend creation reasons during audit" -- Revert "chore: refactor CASH_NOTE_REASON strings to consts" -- Revert "feat(client): dump spends creation_reason statistics" -- Revert "chore: address review comments" -- *(node)* tuning the pricing curve -- *(node)* remove un-necessary is_relayed check inside add_potential_candidates -- move historic_quoting_metrics out of the record_store dir -- clippy fixes for open metrics feature -- *(networking)* update tests for pricing curve tweaks -- *(refactor)* stabilise node size to 4k records, -- Revert "chore: rename output reason to purpose for clarity" -- *(transfers)* comment and naming updates for clarity -- log genesis PK -- rename improperly named foundation_key -- reconfigure local network owner args -- use const for default user or owner -- resolve errors after reverts -- Revert "feat: spend shows the purposes of outputs created for" -- *(node)* use proper SpendReason enum -- add consts - -## [0.106.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.106.1...sn_client-v0.106.2) - 2024-05-09 - -### Fixed -- *(relay_manager)* filter out bad nodes - -## [0.106.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.106.0...sn_client-v0.106.1) - 2024-05-08 - -### Other -- *(release)* sn_registers-v0.3.13 - -## [0.106.0-alpha.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.106.0-alpha.5...sn_client-v0.106.0-alpha.6) - 2024-05-07 - -### Added -- *(client)* dump spends creation_reason statistics -- *(cli)* track spend creation reasons during audit -- *(node)* make spend and cash_note reason field configurable -- *(client)* speed up register checks when paying -- double spend fork detection, fix invalid edges issue -- dag faults unit tests, sn_auditor offline mode -- [**breaking**] renamings in CashNote -- *(faucet)* log from sn_client -- unit testing dag, double spend poisoning tweaks -- report protocol mismatch error -- *(network)* add --upnp flag to node -- *(networking)* feature gate 'upnp' -- *(networking)* add UPnP behavior to open port -- *(relay)* remove autonat and enable hole punching manually -- *(relay)* remove old listen addr if we are using a relayed connection -- *(relay)* update the relay manager if the listen addr has been closed -- *(relay)* remove the dial flow -- *(relay)* impl RelayManager to perform circuit relay when behind NAT -- *(networking)* add in autonat server basics -- *(neetworking)* initial tcp use by default -- *(networking)* clear record on valid put -- *(node)* restrict replication fetch range when node is full -- *(store)* load existing records in parallel -- *(node)* notify peer it is now considered as BAD -- *(node)* restore historic quoting metrics to allow restart -- *(networking)* shift to use ilog2 bucket distance for close data calcs -- spend shows the purposes of outputs created for -- *(cli)* generate a mnemonic as wallet basis if no wallet found -- *(transfers)* do not genereate wallet by default -- [**breaking**] rename token to amount in Spend -- *(tui)* adding services -- *(network)* network contacts url should point to the correct network version - -### Fixed -- create faucet via account load or generation -- more test and cli fixes -- update calls to HotWallet::load -- *(client)* set uploader to use mnemonic wallet loader -- *(client)* move acct_packet mnemonic into client layer -- *(client)* calm down broadcast error logs if we've no listeners -- spend dag double spend links -- orphan test -- orphan parent bug, improve fault detection and logging -- *(networking)* allow wasm32 compilation -- *(network)* remove all external addresses related to a relay server -- *(relay_manager)* remove external addr on connection close -- relay server should not close connections made to a reserved peer -- short circuit identify if the peer is already present in the routitng table -- update outdated connection removal flow -- do not remove outdated connections -- increase relay server capacity -- keep idle connections forever -- pass peer id while crafting relay address -- *(relay)* crafted multi address should contain the P2PCircuit protocol -- do not add reported external addressese if we are behind home network -- *(networking)* do not add to dialed peers -- *(network)* do not strip out relay's PeerId -- *(relay)* craft the correctly formatted relay address -- *(network)* do not perform AutoNat for clients -- *(relay_manager)* do not dial with P2PCircuit protocol -- *(test)* quoting metrics might have live_time field changed along time -- *(node)* avoid false alert on FailedLocalRecord -- *(record_store)* prune only one record at a time -- *(node)* notify replication_fetcher of early completion -- *(node)* fetcher completes on_going_fetch entry on record_key only -- *(node)* not send out replication when failed read from local -- *(networking)* increase the local responsible range of nodes to K_VALUE peers away -- *(network)* clients should not perform farthest relevant record check -- *(node)* replication_fetch keep distance_range sync with record_store -- *(node)* replication_list in range filter -- transfer tests for HotWallet creation -- typo -- *(manager)* do not print to stdout on low verbosity level -- *(protocol)* evaluate NETWORK_VERSION_MODE at compile time - -### Other -- *(versions)* sync versions with latest crates.io vs -- check DAG crawling performance -- address review comments -- refactor CASH_NOTE_REASON strings to consts -- store owner info inside node instead of network -- small cleanup of dead code -- improve naming and typo fix -- clarify client documentation -- clarify client::new description -- clarify client documentation -- clarify client::new description -- *(deps)* bump dependencies -- cargo fmt -- rename output reason to purpose for clarity -- *(network)* move event handling to its own module -- cleanup network events -- *(network)* remove nat detection via incoming connections check -- enable connection keepalive timeout -- remove non relayed listener id from relay manager -- enable multiple relay connections -- return early if peer is not a node -- *(tryout)* do not add new relay candidates -- add debug lines while adding potential relay candidates -- do not remove old non-relayed listeners -- clippy fix -- *(networking)* remove empty file -- *(networking)* re-add global_only -- use quic again -- log listner id -- *(relay)* add candidate even if we are dialing -- remove quic -- cleanup, add in relay server behaviour, and todo -- *(node)* lower some log levels to reduce log size -- *(node)* optimise record_store farthest record calculation -- *(node)* do not reset farthest_acceptance_distance -- *(node)* remove duplicated record_store fullness check -- *(networking)* notify network event on failed put due to prune -- *(networking)* ensure pruned data is indeed further away than kept -- *(CI)* confirm there is no failed replication fetch -- *(networking)* remove circular vec error -- *(node)* unit test for recover historic quoting metrics -- *(node)* pass entire QuotingMetrics into calculate_cost_for_records -- *(node)* extend distance range -- addres review comments -- *(transfers)* reduce error size -- *(transfer)* unit tests for PaymentQuote -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_auditor-v0.1.3-alpha.0/sn_client-v0.105.3-alpha.0/sn_networking-v0.14.2-alpha.0/sn_protocol-v0.16.2-alpha.0/sn_build_info-v0.1.7-alpha.0/sn_transfers-v0.17.2-alpha.0/sn_peers_acquisition-v0.2.9-alpha.0/sn_cli-v0.90.3-alpha.0/sn_node-v0.105.4-alpha.0/sn-node-manager-v0.7.3-alpha.0/sn_faucet-v0.4.4-alpha.0/sn_service_management-v0.2.2-alpha.0/sn_node_rpc_client-v0.6.4-alpha.0 -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_client-v0.105.3-alpha.5/sn_protocol-v0.16.3-alpha.2/sn_cli-v0.90.4-alpha.5/sn_node-v0.105.6-alpha.4/sn-node-manager-v0.7.4-alpha.1/sn_auditor-v0.1.7-alpha.0/sn_networking-v0.14.4-alpha.0/sn_peers_acquisition-v0.2.10-alpha.0/sn_faucet-v0.4.9-alpha.0/sn_service_management-v0.2.4-alpha.0/sn_node_rpc_client-v0.6.8-alpha.0 -- *(release)* sn_client-v0.105.3-alpha.3/sn_protocol-v0.16.3-alpha.1/sn_peers_acquisition-v0.2.9-alpha.2/sn_cli-v0.90.4-alpha.3/sn_node-v0.105.6-alpha.1/sn_auditor-v0.1.5-alpha.0/sn_networking-v0.14.3-alpha.0/sn_faucet-v0.4.7-alpha.0/sn_service_management-v0.2.3-alpha.0/sn-node-manager-v0.7.4-alpha.0/sn_node_rpc_client-v0.6.6-alpha.0 -- *(release)* sn_auditor-v0.1.3-alpha.1/sn_client-v0.105.3-alpha.1/sn_networking-v0.14.2-alpha.1/sn_peers_acquisition-v0.2.9-alpha.1/sn_cli-v0.90.4-alpha.1/sn_metrics-v0.1.4-alpha.0/sn_node-v0.105.5-alpha.1/sn_service_management-v0.2.2-alpha.1/sn-node-manager-v0.7.3-alpha.1/sn_node_rpc_client-v0.6.4-alpha.1/token_supplies-v0.1.47-alpha.0 -- *(release)* sn_build_info-v0.1.7-alpha.1/sn_protocol-v0.16.3-alpha.0/sn_cli-v0.90.4-alpha.0/sn_faucet-v0.4.5-alpha.0/sn_node-v0.105.5-alpha.0 - -## [0.105.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.105.1...sn_client-v0.105.2) - 2024-03-28 - -### Fixed -- *(cli)* read from cache during initial chunking process -- *(uploader)* do not error out on quote expiry during get store cost - -## [0.105.1](https://github.com/joshuef/safe_network/compare/sn_client-v0.105.0...sn_client-v0.105.1) - 2024-03-28 - -### Added -- *(uploader)* error out if the quote has expired during get store_cost -- *(uploader)* use WalletApi to prevent loading client wallet during each operation -- *(transfers)* implement WalletApi to expose common methods - -### Fixed -- *(uploader)* clarify the use of root and wallet dirs - -### Other -- *(uploader)* update docs - -## [0.105.0](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.31...sn_client-v0.105.0) - 2024-03-27 - -### Added -- svg caching, fault tolerance during DAG collection -- *(uploader)* collect all the uploaded registers -- *(uploader)* repay immediately if the quote has expired -- *(uploader)* allow either chunk or chunk path to be used -- *(uploader)* use ClientRegister instead of Registers -- *(uploader)* register existence should be checked before going with payment flow -- *(client)* use the new Uploader insetead of FilesUpload -- *(client)* implement a generic uploader with repay ability -- *(transfers)* enable client to check if a quote has expired -- [**breaking**] remove gossip code -- *(client)* make publish register as an associated function -- *(network)* filter out peers when returning store cost -- *(transfers)* [**breaking**] support multiple payments for the same xorname -- use Arc inside Client, Network to reduce clone cost -- *(networking)* add NodeIssue for tracking bad node shunning -- *(faucet)* rate limit based upon wallet locks - -### Fixed -- *(test)* use tempfile lib instead of stdlib to create temp dirs -- *(clippy)* allow too many arguments as it is a private function -- *(uploader)* remove unused error tracking and allow retries for new payee -- *(uploader)* make the internals more clean -- *(uploader)* update force make payment logic -- *(register)* permissions verification was not being made by some Register APIs -- *(node)* fetching new data shall not cause timed_out immediately -- *(test)* generate unique temp dir to avoid read outdated data -- *(register)* shortcut permissions check when anyone can write to Register - -### Other -- *(uploader)* remove unused code path when store cost is 0 -- *(uploader)* implement tests to test the basic pipeline logic -- *(uploader)* remove FilesApi dependency -- *(uploader)* initial test setup for uploader -- *(uploader)* implement UploaderInterface for easier testing -- *(uploader)* remove failed_to states -- *(register)* minor simplification in Register Permissions implementation -- *(node)* refactor pricing metrics -- lower some networking log levels -- *(node)* loose bad node detection criteria -- *(node)* optimization to reduce logging - -## [0.104.31](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.30...sn_client-v0.104.31) - 2024-03-21 - -### Added -- improve parallelisation with buffered streams -- refactor DAG, improve error management and security -- dag error recording -- *(folders)* folders APIs to accept an encryption key for metadata chunks -- *(protocol)* add rpc to set node log level on the fly - -### Other -- *(cli)* adding automated test for metadata chunk encryption -- *(node)* reduce bad_nodes check resource usage - -## [0.104.30](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.29...sn_client-v0.104.30) - 2024-03-18 - -### Other -- updated the following local packages: sn_networking - -## [0.104.29-alpha.2](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.29-alpha.1...sn_client-v0.104.29-alpha.2) - 2024-03-14 - -### Added -- moved param to outside calc -- refactor spend validation - -### Fixed -- dont stop spend verification at spend error, generalise spend serde - -### Other -- store test utils under a new crate -- *(acc-packet)* adding automated tests to sn_cli::AccountPacket -- improve code quality -- new `sn_service_management` crate -- *(release)* sn_transfers-v0.16.3/sn_cli-v0.89.82 - -## [0.104.29-alpha.1](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.29-alpha.0...sn_client-v0.104.29-alpha.1) - 2024-03-08 - -### Other -- *(folders)* adding automated tests to sn_client::FoldersApi - -## [0.104.28](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.27...sn_client-v0.104.28) - 2024-03-06 - -### Added -- *(cli)* pull any Folders changes from network when syncing and merge them to local version -- make sn_cli use sn_clients reeports -- *(folders)* sync up logic and CLI cmd -- *(register)* when a new entry is written return its hash -- refactor upload with iter -- actionable double spend reporting -- collect royalties through DAG -- *(folders)* store files data-map within Folders metadata chunk -- *(folders)* regenerate tracking info when downloading Folders fm the network -- *(folders)* realise local changes made to folders/files -- *(folders)* keep track of local changes to Folders -- expose sn related deps to app builders - -### Fixed -- filter out spent cashnotes in received client transfers - -### Other -- clean swarm commands errs and spend errors -- also add deps features in sn_client -- *(release)* sn_transfers-v0.16.1 -- *(release)* sn_protocol-v0.15.0/sn-node-manager-v0.4.0 -- *(cli)* removing some redundant logic from acc-packet codebase -- *(folders)* some simplifications to acc-packet codebase - -## [0.104.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.26...sn_client-v0.104.27) - 2024-02-23 - -### Other -- test docs test -- write online documentation -- push documentation -- sync documentation -- write atop write merg branches -- red and write register docs -- create register docs -- register docs - -## [0.104.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.25...sn_client-v0.104.26) - 2024-02-21 - -### Other -- *(release)* initial alpha test release - -## [0.104.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.24...sn_client-v0.104.25) - 2024-02-20 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.23...sn_client-v0.104.24) - 2024-02-20 - -### Added -- estimate feature with ci and balance after with fn docs - -## [0.104.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.22...sn_client-v0.104.23) - 2024-02-20 - -### Other -- updated the following local packages: sn_networking - -## [0.104.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.21...sn_client-v0.104.22) - 2024-02-20 - -### Added -- spend and DAG utilities - -### Other -- improve SpendDagGet names - -## [0.104.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.20...sn_client-v0.104.21) - 2024-02-20 - -### Added -- *(folders)* move folders/files metadata out of Folders entries - -## [0.104.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.19...sn_client-v0.104.20) - 2024-02-20 - -### Added -- *(registers)* expose MerkleReg of RegisterCrdt in all Register types - -### Fixed -- clippy warnings - -### Other -- marke merkle_reg() accessors as unstable (in comment) on Register types - -## [0.104.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.18...sn_client-v0.104.19) - 2024-02-20 - -### Other -- improve DAG crawling performance with better parallelisation - -## [0.104.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.17...sn_client-v0.104.18) - 2024-02-19 - -### Other -- updated the following local packages: sn_networking - -## [0.104.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.16...sn_client-v0.104.17) - 2024-02-19 - -### Other -- updated the following local packages: sn_networking - -## [0.104.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.15...sn_client-v0.104.16) - 2024-02-19 - -### Other -- updated the following local packages: sn_networking - -## [0.104.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.14...sn_client-v0.104.15) - 2024-02-15 - -### Added -- *(client)* keep payee as part of storage payment cache - -### Other -- *(client)* remove the payee-map from StoragePaymentResult - -## [0.104.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.13...sn_client-v0.104.14) - 2024-02-15 - -### Other -- updated the following local packages: sn_networking - -## [0.104.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.12...sn_client-v0.104.13) - 2024-02-15 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.11...sn_client-v0.104.12) - 2024-02-14 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.10...sn_client-v0.104.11) - 2024-02-14 - -### Other -- *(refactor)* move mod.rs files the modern way - -## [0.104.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.9...sn_client-v0.104.10) - 2024-02-13 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.8...sn_client-v0.104.9) - 2024-02-13 - -### Added -- filtering dag errors -- identify orphans and inconsistencies in the DAG - -### Fixed -- manage the genesis spend case - -## [0.104.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.7...sn_client-v0.104.8) - 2024-02-12 - -### Other -- updated the following local packages: sn_networking - -## [0.104.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.6...sn_client-v0.104.7) - 2024-02-12 - -### Other -- updated the following local packages: sn_networking - -## [0.104.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.5...sn_client-v0.104.6) - 2024-02-12 - -### Added -- *(cli)* single payment for all folders being synced -- *(cli)* adding Folders download CLI cmd -- *(client)* adding Folders sync API and CLI cmd - -### Other -- *(cli)* improvements based on peer review - -## [0.104.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.4...sn_client-v0.104.5) - 2024-02-09 - -### Other -- updated the following local packages: sn_networking - -## [0.104.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.3...sn_client-v0.104.4) - 2024-02-09 - -### Other -- updated the following local packages: sn_networking - -## [0.104.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.2...sn_client-v0.104.3) - 2024-02-08 - -### Other -- copyright update to current year - -## [0.104.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.1...sn_client-v0.104.2) - 2024-02-08 - -### Added -- move the RetryStrategy into protocol and use that during cli upload/download -- *(client)* perform more retries if we are verifying a register -- *(network)* impl RetryStrategy to make the reattempts flexible - -### Fixed -- *(ci)* update the reattempt flag to retry_strategy flag for the cli - -### Other -- *(network)* rename re-attempts to retry strategy - -## [0.104.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.0...sn_client-v0.104.1) - 2024-02-08 - -### Other -- updated the following local packages: sn_networking - -## [0.104.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.7...sn_client-v0.104.0) - 2024-02-07 - -### Added -- *(client)* put register to the peer that we paid to -- *(client)* [**breaking**] make the result of the storage payment into a struct - -### Fixed -- rust docs error - -## [0.103.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.6...sn_client-v0.103.7) - 2024-02-07 - -### Added -- extendable local state DAG in cli - -## [0.103.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.5...sn_client-v0.103.6) - 2024-02-06 - -### Other -- updated the following local packages: sn_transfers - -## [0.103.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.4...sn_client-v0.103.5) - 2024-02-05 - -### Other -- updated the following local packages: sn_networking - -## [0.103.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.3...sn_client-v0.103.4) - 2024-02-05 - -### Other -- updated the following local packages: sn_networking - -## [0.103.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.2...sn_client-v0.103.3) - 2024-02-05 - -### Other -- change to hot wallet -- docs formatting -- cargo fmt changes -- example for api verify uploaded chunks -- example for api verify cash note redemptions -- example for api publish on topic -- example for api unsubscribe to topic -- example for api subscribe to topic -- example for api get spend from network -- example for api verify register stored -- example for api get chunk -- example for api store chunk -- example for api create and pay for register -- example for api get register -- example for api get signed reg from network -- example for api signer pk -- example for api signer -- example for api sign -- example for api events channel -- example for api new -- apply format and params to doc templates -- better template set -- mark applicable functions with empty headers - -## [0.103.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.1...sn_client-v0.103.2) - 2024-02-05 - -### Other -- updated the following local packages: sn_protocol - -## [0.103.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.0...sn_client-v0.103.1) - 2024-02-02 - -### Other -- updated the following local packages: sn_networking - -## [0.103.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.22...sn_client-v0.103.0) - 2024-02-02 - -### Other -- [**breaking**] renaming LocalWallet to HotWallet as it holds the secret key for signing tx - -## [0.102.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.21...sn_client-v0.102.22) - 2024-02-01 - -### Other -- updated the following local packages: sn_networking - -## [0.102.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.20...sn_client-v0.102.21) - 2024-02-01 - -### Fixed -- *(client)* error out when fetching large data_map - -## [0.102.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.19...sn_client-v0.102.20) - 2024-02-01 - -### Other -- updated the following local packages: sn_networking - -## [0.102.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.18...sn_client-v0.102.19) - 2024-01-31 - -### Other -- nano tokens to network address -- change to question mark from expect -- test doc changes to remove code and refactor for pr -- broadcast signed spends -- send -- verify cash note -- receive and cargo fmt -- send spends - -## [0.102.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.17...sn_client-v0.102.18) - 2024-01-31 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.102.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.16...sn_client-v0.102.17) - 2024-01-30 - -### Other -- *(client)* log client upload failure error - -## [0.102.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.15...sn_client-v0.102.16) - 2024-01-30 - -### Fixed -- *(client)* error out on verify_chunk_store - -## [0.102.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.14...sn_client-v0.102.15) - 2024-01-30 - -### Other -- updated the following local packages: sn_networking - -## [0.102.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.13...sn_client-v0.102.14) - 2024-01-30 - -### Other -- updated the following local packages: sn_protocol - -## [0.102.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.12...sn_client-v0.102.13) - 2024-01-29 - -### Other -- *(sn_transfers)* making some functions/helpers to be constructor methods of public structs - -## [0.102.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.11...sn_client-v0.102.12) - 2024-01-25 - -### Other -- improved pay for storage -- mut wallet description -- revert to mut wallet -- change to wallet result -- cargo fmt -- into wallet doc -- into wallet doc -- expand abbreviations mutable wallet -- pay for storage clone for test pass -- expand on abbreviation and added detail -- pay for records example -- pay for records and cleanup -- pay for storage once detail -- send unsigned detail -- pay for storage -- get store cost at addr unused - -## [0.102.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.10...sn_client-v0.102.11) - 2024-01-25 - -### Other -- updated the following local packages: sn_networking - -## [0.102.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.9...sn_client-v0.102.10) - 2024-01-25 - -### Added -- client webtransport-websys feat - -### Other -- use a single target_arch.rs to simplify imports for wasm32 or no - -## [0.102.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.8...sn_client-v0.102.9) - 2024-01-24 - -### Other -- updated the following local packages: sn_networking, sn_networking - -## [0.102.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.7...sn_client-v0.102.8) - 2024-01-24 - -### Added -- client webtransport-websys feat - -### Other -- tidy up wasm32 as target arch rather than a feat - -## [0.102.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.6...sn_client-v0.102.7) - 2024-01-23 - -### Other -- *(release)* sn_protocol-v0.10.14/sn_networking-v0.12.35 - -## [0.102.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.5...sn_client-v0.102.6) - 2024-01-22 - -### Other -- wallet docs - -## [0.102.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.4...sn_client-v0.102.5) - 2024-01-22 - -### Added -- spend dag utils - -## [0.102.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.3...sn_client-v0.102.4) - 2024-01-18 - -### Other -- updated the following local packages: sn_protocol - -## [0.102.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.2...sn_client-v0.102.3) - 2024-01-18 - -### Added -- set quic as default transport - -## [0.102.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.1...sn_client-v0.102.2) - 2024-01-18 - -### Other -- updated the following local packages: sn_transfers - -## [0.102.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.0...sn_client-v0.102.1) - 2024-01-17 - -### Other -- fixed typo -- filled missing arguments -- formatting -- formatting -- new wallet docs - -## [0.102.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.13...sn_client-v0.102.0) - 2024-01-17 - -### Fixed -- *(docs)* update Client signature for doc test -- *(client)* move out the peers added var to event handler loop - -### Other -- *(client)* [**breaking**] move out client connection progress bar - -## [0.101.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.12...sn_client-v0.101.13) - 2024-01-17 - -### Other -- new wallet client example - -## [0.101.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.11...sn_client-v0.101.12) - 2024-01-16 - -### Other -- updated the following local packages: sn_transfers - -## [0.101.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.10...sn_client-v0.101.11) - 2024-01-15 - -### Fixed -- *(client)* avoid deadlock during upload in case of error - -## [0.101.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.9...sn_client-v0.101.10) - 2024-01-15 - -### Other -- updated the following local packages: sn_protocol - -## [0.101.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.8...sn_client-v0.101.9) - 2024-01-15 - -### Fixed -- *(client)* cache payments via disk instead of memory map - -### Other -- *(client)* collect wallet handling time statistics - -## [0.101.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.7...sn_client-v0.101.8) - 2024-01-12 - -### Other -- updated the following local packages: sn_networking - -## [0.101.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.6...sn_client-v0.101.7) - 2024-01-12 - -### Fixed -- *(client)* avoid dead lock with less chunks - -## [0.101.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.5...sn_client-v0.101.6) - 2024-01-11 - -### Other -- *(client)* refactor client upload flow - -## [0.101.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.4...sn_client-v0.101.5) - 2024-01-11 - -### Added -- error if file size smaller than MIN_ENCRYPTABLE_BYTES - -### Other -- udpate self_encryption dep - -## [0.101.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.3...sn_client-v0.101.4) - 2024-01-11 - -### Other -- updated the following local packages: sn_networking - -## [0.101.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.2...sn_client-v0.101.3) - 2024-01-10 - -### Added -- *(client)* client APIs and CLI cmd to broadcast a transaction signed offline - -### Other -- fixup send_spends and use ExcessiveNanoValue error - -## [0.101.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.1...sn_client-v0.101.2) - 2024-01-10 - -### Added -- allow register CLI to create a public register writable to anyone - -## [0.101.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.0...sn_client-v0.101.1) - 2024-01-09 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.101.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.100.1...sn_client-v0.101.0) - 2024-01-09 - -### Added -- *(client)* use buffered future stream to download chunks - -### Fixed -- *(client)* empty out the download cache once the stream exits -- *(ci)* fix clippy error due to Send not being general - -### Other -- *(client)* add docs to FilesDownload -- *(client)* [**breaking**] move read_from range into `DownloadFiles` - -## [0.100.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.100.0...sn_client-v0.100.1) - 2024-01-09 - -### Other -- get spend from network only require Majority - -## [0.100.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.42...sn_client-v0.100.0) - 2024-01-08 - -### Added -- *(cli)* intergrate FilesDownload with cli -- *(client)* emit events from download process - -### Other -- *(client)* [**breaking**] refactor `Files` into `FilesUpload` - -## [0.99.42](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.41...sn_client-v0.99.42) - 2024-01-08 - -### Other -- updated the following local packages: sn_networking - -## [0.99.41](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.40...sn_client-v0.99.41) - 2024-01-08 - -### Other -- more doc updates to readme files - -## [0.99.40](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.39...sn_client-v0.99.40) - 2024-01-08 - -### Fixed -- *(client)* reset sequential_payment_fails on batch upload success - -## [0.99.39](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.38...sn_client-v0.99.39) - 2024-01-05 - -### Other -- add clippy unwrap lint to workspace - -## [0.99.38](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.37...sn_client-v0.99.38) - 2024-01-05 - -### Added -- *(network)* move the kad::put_record_to inside PutRecordCfg - -## [0.99.37](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.36...sn_client-v0.99.37) - 2024-01-03 - -### Added -- *(client)* clients no longer upload data_map by default - -### Other -- refactor for clarity around head_chunk_address -- *(cli)* do not write datamap chunk if non-public - -## [0.99.36](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.35...sn_client-v0.99.36) - 2024-01-03 - -### Other -- updated the following local packages: sn_networking - -## [0.99.35](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.34...sn_client-v0.99.35) - 2024-01-02 - -### Fixed -- *(client)* wallet not progress with unconfirmed tx - -## [0.99.34](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.33...sn_client-v0.99.34) - 2024-01-02 - -### Other -- updated the following local packages: sn_networking - -## [0.99.33](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.32...sn_client-v0.99.33) - 2023-12-29 - -### Other -- updated the following local packages: sn_networking - -## [0.99.32](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.31...sn_client-v0.99.32) - 2023-12-29 - -### Added -- use put_record_to during upload chunk - -## [0.99.31](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.30...sn_client-v0.99.31) - 2023-12-26 - -### Other -- updated the following local packages: sn_networking - -## [0.99.30](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.29...sn_client-v0.99.30) - 2023-12-22 - -### Other -- updated the following local packages: sn_networking - -## [0.99.29](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.28...sn_client-v0.99.29) - 2023-12-21 - -### Other -- *(client)* emit chunk Uploaded event if a chunk was verified during repayment - -## [0.99.28](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.27...sn_client-v0.99.28) - 2023-12-20 - -### Other -- reduce default batch size - -## [0.99.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.26...sn_client-v0.99.27) - 2023-12-19 - -### Added -- network royalties through audit POC - -## [0.99.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.25...sn_client-v0.99.26) - 2023-12-19 - -### Other -- updated the following local packages: sn_networking - -## [0.99.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.24...sn_client-v0.99.25) - 2023-12-19 - -### Fixed -- *(test)* tests should try to load just the faucet wallet - -## [0.99.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.23...sn_client-v0.99.24) - 2023-12-19 - -### Other -- updated the following local packages: sn_networking - -## [0.99.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.22...sn_client-v0.99.23) - 2023-12-19 - -### Fixed -- *(cli)* mark chunk completion as soon as we upload each chunk - -## [0.99.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.21...sn_client-v0.99.22) - 2023-12-18 - -### Added -- *(transfers)* add api for cleaning up CashNotes - -## [0.99.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.20...sn_client-v0.99.21) - 2023-12-18 - -### Added -- *(client)* update the Files config via setters -- *(client)* track the upload stats inside Files -- *(client)* move upload retry logic from CLI to client - -### Fixed -- *(test)* use the Files struct to upload chunks - -### Other -- *(client)* add docs to the Files struct - -## [0.99.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.19...sn_client-v0.99.20) - 2023-12-14 - -### Other -- updated the following local packages: sn_networking, sn_protocol, sn_registers, sn_transfers - -## [0.99.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.18...sn_client-v0.99.19) - 2023-12-14 - -### Added -- *(client)* add backoff to payment retries -- *(networking)* use backoff for get_record - -## [0.99.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.17...sn_client-v0.99.18) - 2023-12-14 - -### Other -- *(test)* fix log messages during churn test - -## [0.99.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.16...sn_client-v0.99.17) - 2023-12-14 - -### Added -- *(cli)* simple retry mechanism for remaining chunks - -## [0.99.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.15...sn_client-v0.99.16) - 2023-12-13 - -### Other -- updated the following local packages: sn_networking - -## [0.99.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.14...sn_client-v0.99.15) - 2023-12-13 - -### Added -- add amounts to edges -- audit DAG collection and visualization -- cli double spends audit from genesis - -### Fixed -- docs - -### Other -- udeps and gitignore - -## [0.99.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.13...sn_client-v0.99.14) - 2023-12-12 - -### Other -- updated the following local packages: sn_protocol - -## [0.99.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.12...sn_client-v0.99.13) - 2023-12-12 - -### Added -- *(cli)* skip payment and upload for existing chunks - -## [0.99.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.11...sn_client-v0.99.12) - 2023-12-12 - -### Added -- constant uploading across batches - -## [0.99.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.10...sn_client-v0.99.11) - 2023-12-11 - -### Other -- updated the following local packages: sn_networking - -## [0.99.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.9...sn_client-v0.99.10) - 2023-12-07 - -### Other -- updated the following local packages: sn_networking - -## [0.99.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.8...sn_client-v0.99.9) - 2023-12-06 - -### Other -- *(network)* use PUT Quorum::One for chunks -- *(network)* add more docs to the get_record_handlers - -## [0.99.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.7...sn_client-v0.99.8) - 2023-12-06 - -### Other -- updated the following local packages: sn_networking - -## [0.99.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.6...sn_client-v0.99.7) - 2023-12-06 - -### Other -- updated the following local packages: sn_transfers - -## [0.99.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.5...sn_client-v0.99.6) - 2023-12-06 - -### Other -- remove some needless cloning -- remove needless pass by value -- use inline format args -- add boilerplate for workspace lints - -## [0.99.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.4...sn_client-v0.99.5) - 2023-12-05 - -### Added -- *(network)* use custom enum for get_record errors - -### Other -- *(network)* avoid losing error info by converting them to a single type - -## [0.99.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.3...sn_client-v0.99.4) - 2023-12-05 - -### Other -- updated the following local packages: sn_transfers - -## [0.99.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.2...sn_client-v0.99.3) - 2023-12-05 - -### Other -- updated the following local packages: sn_networking - -## [0.99.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.1...sn_client-v0.99.2) - 2023-12-05 - -### Added -- allow for cli chunk put retries for un verifiable chunks - -### Fixed -- mark chunks as completed when no failures on retry - -## [0.99.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.0...sn_client-v0.99.1) - 2023-12-05 - -### Fixed -- *(client)* dont assume verification is always set w/ VerificationConfig - -### Other -- tie node reward test to number of data. -- *(networking)* remove triggered bootstrap slowdown - -## [0.99.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.23...sn_client-v0.99.0) - 2023-12-01 - -### Added -- *(network)* use seperate PUT/GET configs - -### Other -- *(ci)* fix CI build cache parsing error -- *(network)* [**breaking**] use the Quorum struct provided by libp2p - -## [0.98.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.22...sn_client-v0.98.23) - 2023-11-29 - -### Other -- updated the following local packages: sn_networking - -## [0.98.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.21...sn_client-v0.98.22) - 2023-11-29 - -### Other -- updated the following local packages: sn_networking - -## [0.98.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.20...sn_client-v0.98.21) - 2023-11-29 - -### Added -- add missing quic features - -## [0.98.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.19...sn_client-v0.98.20) - 2023-11-29 - -### Added -- verify all the way to genesis -- verify spends through the cli - -### Fixed -- genesis check security flaw - -## [0.98.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.18...sn_client-v0.98.19) - 2023-11-28 - -### Added -- *(chunks)* serialise Chunks with MsgPack instead of bincode - -## [0.98.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.17...sn_client-v0.98.18) - 2023-11-28 - -### Other -- updated the following local packages: sn_protocol - -## [0.98.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.16...sn_client-v0.98.17) - 2023-11-27 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.98.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.15...sn_client-v0.98.16) - 2023-11-23 - -### Added -- *(networking)* reduce batch size to 64 -- add centralised retries for all data payment kinds - -### Fixed -- previous code assumptions - -## [0.98.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.14...sn_client-v0.98.15) - 2023-11-23 - -### Other -- updated the following local packages: sn_networking - -## [0.98.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.13...sn_client-v0.98.14) - 2023-11-23 - -### Other -- updated the following local packages: sn_transfers - -## [0.98.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.12...sn_client-v0.98.13) - 2023-11-23 - -### Other -- updated the following local packages: sn_networking - -## [0.98.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.11...sn_client-v0.98.12) - 2023-11-22 - -### Other -- *(release)* non gossip handler shall not throw gossip msg up - -## [0.98.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.10...sn_client-v0.98.11) - 2023-11-22 - -### Added -- *(cli)* add download batch-size option - -## [0.98.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.9...sn_client-v0.98.10) - 2023-11-21 - -### Added -- make joining gossip for clients and rpc nodes optional - -### Other -- *(sn_networking)* enable_gossip via the builder pattern - -## [0.98.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.8...sn_client-v0.98.9) - 2023-11-21 - -### Other -- updated the following local packages: sn_networking - -## [0.98.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.7...sn_client-v0.98.8) - 2023-11-20 - -### Other -- increase default batch size - -## [0.98.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.6...sn_client-v0.98.7) - 2023-11-20 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.98.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.5...sn_client-v0.98.6) - 2023-11-20 - -### Other -- updated the following local packages: sn_networking - -## [0.98.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.4...sn_client-v0.98.5) - 2023-11-20 - -### Added -- quotes - -## [0.98.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.3...sn_client-v0.98.4) - 2023-11-17 - -### Fixed -- *(client)* ensure we store spends at CLOSE_GROUP nodes. - -## [0.98.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.2...sn_client-v0.98.3) - 2023-11-16 - -### Other -- updated the following local packages: sn_networking - -## [0.98.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.1...sn_client-v0.98.2) - 2023-11-16 - -### Added -- massive cleaning to prepare for quotes - -## [0.98.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.0...sn_client-v0.98.1) - 2023-11-15 - -### Other -- updated the following local packages: sn_protocol - -## [0.98.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.6...sn_client-v0.98.0) - 2023-11-15 - -### Added -- *(client)* [**breaking**] error out if we cannot connect to the network in - -### Other -- *(client)* [**breaking**] remove request_response timeout argument - -## [0.97.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.5...sn_client-v0.97.6) - 2023-11-15 - -### Other -- updated the following local packages: sn_protocol, sn_transfers - -## [0.97.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.4...sn_client-v0.97.5) - 2023-11-14 - -### Other -- *(royalties)* verify royalties fees amounts - -## [0.97.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.3...sn_client-v0.97.4) - 2023-11-14 - -### Other -- updated the following local packages: sn_networking - -## [0.97.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.2...sn_client-v0.97.3) - 2023-11-14 - -### Other -- updated the following local packages: sn_networking - -## [0.97.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.1...sn_client-v0.97.2) - 2023-11-13 - -### Added -- no throwing up if not a gossip listener - -## [0.97.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.0...sn_client-v0.97.1) - 2023-11-10 - -### Other -- updated the following local packages: sn_transfers - -## [0.97.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.6...sn_client-v0.97.0) - 2023-11-10 - -### Added -- verify chunks with Quorum::N(2) -- *(client)* only pay one node - -### Fixed -- *(client)* register validations checks for more than one node -- *(client)* set Quorum::One for registers -- *(test)* use client API to listen for gossipsub msgs when checking transfer notifs - -### Other -- *(transfers)* more logs around payments... -- *(churn)* small delay before validating chunks in data_with_churn -- *(client)* register get quorum->one -- *(tests)* make gossipsub verification more strict wrt number of msgs received - -## [0.96.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.5...sn_client-v0.96.6) - 2023-11-09 - -### Other -- updated the following local packages: sn_transfers - -## [0.96.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.4...sn_client-v0.96.5) - 2023-11-09 - -### Other -- updated the following local packages: sn_networking - -## [0.96.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.3...sn_client-v0.96.4) - 2023-11-09 - -### Other -- updated the following local packages: sn_networking - -## [0.96.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.2...sn_client-v0.96.3) - 2023-11-08 - -### Other -- updated the following local packages: sn_networking - -## [0.96.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.1...sn_client-v0.96.2) - 2023-11-08 - -### Added -- *(node)* set custom msg id in order to deduplicate transfer notifs - -## [0.96.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.0...sn_client-v0.96.1) - 2023-11-07 - -### Other -- Derive Clone on ClientRegister - -## [0.96.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.27...sn_client-v0.96.0) - 2023-11-07 - -### Fixed -- *(client)* [**breaking**] make `Files::chunk_file` into an associated function - -## [0.95.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.26...sn_client-v0.95.27) - 2023-11-07 - -### Other -- updated the following local packages: sn_protocol - -## [0.95.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.25...sn_client-v0.95.26) - 2023-11-06 - -### Added -- *(node)* log marker to track the number of peers in the routing table - -## [0.95.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.24...sn_client-v0.95.25) - 2023-11-06 - -### Other -- updated the following local packages: sn_protocol - -## [0.95.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.23...sn_client-v0.95.24) - 2023-11-06 - -### Other -- updated the following local packages: sn_protocol - -## [0.95.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.22...sn_client-v0.95.23) - 2023-11-06 - -### Added -- *(deps)* upgrade libp2p to 0.53 - -## [0.95.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.21...sn_client-v0.95.22) - 2023-11-03 - -### Other -- updated the following local packages: sn_networking - -## [0.95.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.20...sn_client-v0.95.21) - 2023-11-02 - -### Other -- updated the following local packages: sn_networking - -## [0.95.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.19...sn_client-v0.95.20) - 2023-11-02 - -### Added -- keep transfers in mem instead of heavy cashnotes - -## [0.95.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.18...sn_client-v0.95.19) - 2023-11-01 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.95.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.17...sn_client-v0.95.18) - 2023-11-01 - -### Other -- log detailed intermediate errors - -## [0.95.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.16...sn_client-v0.95.17) - 2023-11-01 - -### Other -- updated the following local packages: sn_networking - -## [0.95.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.15...sn_client-v0.95.16) - 2023-11-01 - -### Other -- updated the following local packages: sn_transfers - -## [0.95.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.14...sn_client-v0.95.15) - 2023-10-31 - -### Other -- updated the following local packages: sn_networking - -## [0.95.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.13...sn_client-v0.95.14) - 2023-10-30 - -### Other -- *(networking)* de/serialise directly to Bytes - -## [0.95.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.12...sn_client-v0.95.13) - 2023-10-30 - -### Added -- `bincode::serialize` into `Bytes` without intermediate allocation - -## [0.95.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.11...sn_client-v0.95.12) - 2023-10-30 - -### Other -- *(node)* use Bytes for Gossip related data types -- *(node)* make gossipsubpublish take Bytes - -## [0.95.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.10...sn_client-v0.95.11) - 2023-10-27 - -### Added -- *(rpc-client)* be able to decrpyt received Transfers by providing a secret key - -## [0.95.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.9...sn_client-v0.95.10) - 2023-10-27 - -### Other -- updated the following local packages: sn_networking - -## [0.95.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.8...sn_client-v0.95.9) - 2023-10-26 - -### Fixed -- client carry out merge when verify register storage - -## [0.95.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.7...sn_client-v0.95.8) - 2023-10-26 - -### Fixed -- add libp2p identity with rand dep for tests - -## [0.95.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.6...sn_client-v0.95.7) - 2023-10-26 - -### Other -- updated the following local packages: sn_networking, sn_registers, sn_transfers - -## [0.95.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.5...sn_client-v0.95.6) - 2023-10-26 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.95.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.4...sn_client-v0.95.5) - 2023-10-25 - -### Added -- *(cli)* chunk files in parallel - -## [0.95.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.3...sn_client-v0.95.4) - 2023-10-24 - -### Fixed -- *(tests)* nodes rewards tests to account for repayments amounts - -## [0.95.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.2...sn_client-v0.95.3) - 2023-10-24 - -### Other -- *(api)* wallet APIs to account for network royalties fees when returning total cost paid for storage - -## [0.95.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.1...sn_client-v0.95.2) - 2023-10-24 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.95.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.0...sn_client-v0.95.1) - 2023-10-24 - -### Added -- *(client)* do not retry verification GETs - -### Other -- log and debug SwarmCmd - -## [0.95.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.8...sn_client-v0.95.0) - 2023-10-24 - -### Added -- *(protocol)* [**breaking**] implement `PrettyPrintRecordKey` as a `Cow` type - -## [0.94.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.7...sn_client-v0.94.8) - 2023-10-23 - -### Other -- updated the following local packages: sn_networking - -## [0.94.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.6...sn_client-v0.94.7) - 2023-10-23 - -### Other -- more custom debug and debug skips - -## [0.94.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.5...sn_client-v0.94.6) - 2023-10-22 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.94.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.4...sn_client-v0.94.5) - 2023-10-21 - -### Other -- updated the following local packages: sn_networking - -## [0.94.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.3...sn_client-v0.94.4) - 2023-10-20 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.94.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.2...sn_client-v0.94.3) - 2023-10-20 - -### Added -- *(client)* stop futher bootstrapping if the client has K_VALUE peers - -## [0.94.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.1...sn_client-v0.94.2) - 2023-10-19 - -### Fixed -- *(network)* emit NetworkEvent when we publish a gossipsub msg - -## [0.94.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.0...sn_client-v0.94.1) - 2023-10-18 - -### Other -- updated the following local packages: sn_networking - -## [0.94.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.18...sn_client-v0.94.0) - 2023-10-18 - -### Added -- *(client)* verify register sync, and repay if not stored on all nodes -- *(client)* verify register uploads and retry and repay if failed - -### Other -- Revert "feat: keep transfers in mem instead of mem and i/o heavy cashnotes" -- *(client)* always validate storage payments -- repay for data in node rewards tests -- *(client)* remove price tolerance at the client - -## [0.93.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.17...sn_client-v0.93.18) - 2023-10-18 - -### Added -- keep transfers in mem instead of mem and i/o heavy cashnotes - -## [0.93.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.16...sn_client-v0.93.17) - 2023-10-17 - -### Fixed -- *(transfers)* dont overwrite existing payment transactions when we top up - -### Other -- adding comments and cleanup around quorum / payment fixes - -## [0.93.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.15...sn_client-v0.93.16) - 2023-10-16 - -### Fixed -- return correct error type -- consider record split an error, handle it for regs - -## [0.93.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.14...sn_client-v0.93.15) - 2023-10-16 - -### Other -- updated the following local packages: sn_networking - -## [0.93.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.13...sn_client-v0.93.14) - 2023-10-13 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.93.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.12...sn_client-v0.93.13) - 2023-10-13 - -### Fixed -- batch download process - -## [0.93.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.11...sn_client-v0.93.12) - 2023-10-12 - -### Other -- updated the following local packages: sn_networking - -## [0.93.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.10...sn_client-v0.93.11) - 2023-10-12 - -### Other -- updated the following local packages: sn_networking - -## [0.93.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.9...sn_client-v0.93.10) - 2023-10-12 - -### Other -- more detailed logging when client creating store cash_note - -## [0.93.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.8...sn_client-v0.93.9) - 2023-10-11 - -### Fixed -- expose RecordMismatch errors and cleanup wallet if we hit that - -### Other -- *(transfers)* add somre more clarity around DoubleSpendAttemptedForCashNotes -- *(transfers)* remove pointless api - -## [0.93.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.7...sn_client-v0.93.8) - 2023-10-11 - -### Other -- updated the following local packages: sn_networking - -## [0.93.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.6...sn_client-v0.93.7) - 2023-10-11 - -### Added -- showing expected holders to CLI when required -- verify put_record with expected_holders - -## [0.93.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.5...sn_client-v0.93.6) - 2023-10-10 - -### Added -- *(transfer)* special event for transfer notifs over gossipsub - -## [0.93.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.4...sn_client-v0.93.5) - 2023-10-10 - -### Other -- compare files after download twice - -## [0.93.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.3...sn_client-v0.93.4) - 2023-10-10 - -### Other -- updated the following local packages: sn_transfers - -## [0.93.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.2...sn_client-v0.93.3) - 2023-10-09 - -### Other -- updated the following local packages: sn_networking - -## [0.93.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.1...sn_client-v0.93.2) - 2023-10-08 - -### Other -- updated the following local packages: sn_networking - -## [0.93.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.0...sn_client-v0.93.1) - 2023-10-06 - -### Added -- feat!(sn_transfers): unify store api for wallet - -### Other -- *(client)* dont println for wallet errors - -## [0.93.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.9...sn_client-v0.93.0) - 2023-10-06 - -### Fixed -- *(client)* [**breaking**] unify send_without_verify and send functions - -### Other -- *(cli)* reuse the client::send function to send amount from wallet - -## [0.92.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.8...sn_client-v0.92.9) - 2023-10-06 - -### Other -- fix new clippy errors - -## [0.92.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.7...sn_client-v0.92.8) - 2023-10-05 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.92.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.6...sn_client-v0.92.7) - 2023-10-05 - -### Added -- feat!(cli): remove concurrency argument - -## [0.92.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.5...sn_client-v0.92.6) - 2023-10-05 - -### Fixed -- *(sn_transfers)* be sure we store CashNotes before writing the wallet file - -## [0.92.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.4...sn_client-v0.92.5) - 2023-10-05 - -### Added -- quorum for records get - -### Fixed -- use specific verify func for chunk stored verification - -## [0.92.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.3...sn_client-v0.92.4) - 2023-10-05 - -### Added -- use progress bars on `files upload` - -### Other -- pay_for_chunks returns cost and new balance - -## [0.92.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.2...sn_client-v0.92.3) - 2023-10-04 - -### Fixed -- *(wallet)* remove expect statments - -## [0.92.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.1...sn_client-v0.92.2) - 2023-10-04 - -### Fixed -- record_to_verify for store_chunk shall be a Chunk - -## [0.92.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.0...sn_client-v0.92.1) - 2023-10-04 - -### Other -- updated the following local packages: sn_networking - -## [0.92.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.11...sn_client-v0.92.0) - 2023-10-04 - -### Added -- improve register API - -## [0.91.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.10...sn_client-v0.91.11) - 2023-10-04 - -### Added -- *(client)* reuse cashnotes for address payments - -### Other -- separate method and write test - -## [0.91.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.9...sn_client-v0.91.10) - 2023-10-03 - -### Other -- updated the following local packages: sn_networking - -## [0.91.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.8...sn_client-v0.91.9) - 2023-10-03 - -### Added -- re-attempt when get chunk from network - -## [0.91.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.7...sn_client-v0.91.8) - 2023-10-03 - -### Other -- updated the following local packages: sn_networking - -## [0.91.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.6...sn_client-v0.91.7) - 2023-10-02 - -### Other -- remove all spans. - -## [0.91.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.5...sn_client-v0.91.6) - 2023-10-02 - -### Other -- updated the following local packages: sn_transfers - -## [0.91.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.4...sn_client-v0.91.5) - 2023-10-02 - -### Other -- *(client)* more logs around StoreCost retrieveal - -## [0.91.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.3...sn_client-v0.91.4) - 2023-09-29 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.91.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.2...sn_client-v0.91.3) - 2023-09-28 - -### Added -- client to client transfers - -## [0.91.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.1...sn_client-v0.91.2) - 2023-09-27 - -### Added -- *(networking)* remove optional_semaphore being passed down from apps -- all records are Quorum::All once more - -## [0.91.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.0...sn_client-v0.91.1) - 2023-09-27 - -### Added -- *(client)* fail fast when a chunk is missing - -## [0.91.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.6...sn_client-v0.91.0) - 2023-09-27 - -### Added -- deep clean sn_transfers, reduce exposition, remove dead code - -## [0.90.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.5...sn_client-v0.90.6) - 2023-09-26 - -### Other -- updated the following local packages: sn_networking - -## [0.90.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.4...sn_client-v0.90.5) - 2023-09-26 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC service to unsubscribe from gossipsub topics - -## [0.90.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.3...sn_client-v0.90.4) - 2023-09-25 - -### Other -- updated the following local packages: sn_transfers - -## [0.90.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.2...sn_client-v0.90.3) - 2023-09-25 - -### Other -- cleanup renamings in sn_transfers - -## [0.90.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.1...sn_client-v0.90.2) - 2023-09-25 - -### Other -- *(client)* serialize ClientEvent - -## [0.90.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.0...sn_client-v0.90.1) - 2023-09-22 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC services to pub/sub to gossipsub topics - -## [0.90.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.23...sn_client-v0.90.0) - 2023-09-21 - -### Added -- dusking DBCs - -### Other -- rename Nano NanoTokens -- improve naming - -## [0.89.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.22...sn_client-v0.89.23) - 2023-09-21 - -### Other -- updated the following local packages: sn_networking - -## [0.89.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.21...sn_client-v0.89.22) - 2023-09-21 - -### Other -- clarify `files download` usage -- output address of uploaded file - -## [0.89.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.20...sn_client-v0.89.21) - 2023-09-20 - -### Other -- updated the following local packages: sn_networking - -## [0.89.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.19...sn_client-v0.89.20) - 2023-09-20 - -### Other -- major dep updates - -## [0.89.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.18...sn_client-v0.89.19) - 2023-09-20 - -### Other -- allow chunks to be Quorum::One - -## [0.89.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.17...sn_client-v0.89.18) - 2023-09-19 - -### Other -- updated the following local packages: sn_networking - -## [0.89.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.16...sn_client-v0.89.17) - 2023-09-19 - -### Other -- error handling when failed fetch store cost - -## [0.89.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.15...sn_client-v0.89.16) - 2023-09-19 - -### Other -- updated the following local packages: sn_networking - -## [0.89.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.14...sn_client-v0.89.15) - 2023-09-19 - -### Other -- updated the following local packages: sn_networking - -## [0.89.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.13...sn_client-v0.89.14) - 2023-09-18 - -### Other -- updated the following local packages: sn_networking - -## [0.89.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.12...sn_client-v0.89.13) - 2023-09-18 - -### Added -- *(client)* download file concurrently - -## [0.89.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.11...sn_client-v0.89.12) - 2023-09-18 - -### Added -- serialisation for transfers for out of band sending - -### Other -- *(client)* simplify API -- *(cli)* use iter::chunks() API to batch and pay for our chunks - -## [0.89.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.10...sn_client-v0.89.11) - 2023-09-15 - -### Added -- *(client)* pay for chunks in batches - -### Other -- *(client)* refactor chunk upload code to allow greater concurrency - -## [0.89.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.9...sn_client-v0.89.10) - 2023-09-15 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.89.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.8...sn_client-v0.89.9) - 2023-09-15 - -### Other -- *(client)* remove unused wallet_client - -## [0.89.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.7...sn_client-v0.89.8) - 2023-09-14 - -### Added -- *(register)* client to pay for Register only if local wallet has not paymnt for it yet - -## [0.89.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.6...sn_client-v0.89.7) - 2023-09-14 - -### Added -- split upload procedure into batches - -## [0.89.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.5...sn_client-v0.89.6) - 2023-09-14 - -### Added -- *(network)* enable custom node metrics -- *(network)* use NetworkConfig for network construction - -### Other -- remove unused error variants -- *(network)* use builder pattern to construct the Network -- *(metrics)* rename feature flag and small fixes - -## [0.89.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.4...sn_client-v0.89.5) - 2023-09-13 - -### Added -- *(register)* paying nodes for Register storage - -### Other -- *(register)* adding Register payment storage tests to run in CI -- *(payments)* adaptig code to recent changes in Transfers - -## [0.89.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.3...sn_client-v0.89.4) - 2023-09-12 - -### Added -- utilize stream decryptor - -## [0.89.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.2...sn_client-v0.89.3) - 2023-09-12 - -### Other -- updated the following local packages: sn_networking - -## [0.89.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.1...sn_client-v0.89.2) - 2023-09-12 - -### Other -- *(metrics)* rename network metrics and remove from default features list - -## [0.89.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.0...sn_client-v0.89.1) - 2023-09-12 - -### Added -- add tx and parent spends verification -- chunk payments using UTXOs instead of DBCs - -### Other -- use updated sn_dbc - -## [0.89.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.16...sn_client-v0.89.0) - 2023-09-11 - -### Added -- [**breaking**] Clients add a tolerance to store cost - -## [0.88.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.15...sn_client-v0.88.16) - 2023-09-11 - -### Other -- utilize stream encryptor - -## [0.88.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.14...sn_client-v0.88.15) - 2023-09-08 - -### Added -- *(client)* repay for chunks if they cannot be validated - -### Other -- *(client)* refactor to have permits at network layer -- *(refactor)* remove wallet_client args from upload flow -- *(refactor)* remove upload_chunks semaphore arg - -## [0.88.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.13...sn_client-v0.88.14) - 2023-09-07 - -### Other -- updated the following local packages: sn_networking - -## [0.88.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.12...sn_client-v0.88.13) - 2023-09-07 - -### Other -- updated the following local packages: sn_networking - -## [0.88.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.11...sn_client-v0.88.12) - 2023-09-05 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.88.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.10...sn_client-v0.88.11) - 2023-09-05 - -### Added -- encryptioni output to disk - -## [0.88.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.9...sn_client-v0.88.10) - 2023-09-05 - -### Other -- updated the following local packages: sn_networking - -## [0.88.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.8...sn_client-v0.88.9) - 2023-09-04 - -### Added -- feat!(protocol): make payments for all record types - -### Fixed -- fix permissions for public register creation - -### Other -- *(release)* sn_registers-v0.2.4 -- utilize encrypt_from_file - -## [0.88.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.7...sn_client-v0.88.8) - 2023-09-04 - -### Other -- Add client and protocol detail - -## [0.88.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.6...sn_client-v0.88.7) - 2023-09-01 - -### Other -- *(transfers)* store dbcs by ref to avoid more clones -- *(client)* make unconfonfirmed txs btreeset, remove unnecessary cloning -- *(client)* remove one signed_spend clone - -## [0.88.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.5...sn_client-v0.88.6) - 2023-09-01 - -### Other -- updated the following local packages: sn_networking - -## [0.88.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.4...sn_client-v0.88.5) - 2023-08-31 - -### Other -- remove unused async - -## [0.88.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.3...sn_client-v0.88.4) - 2023-08-31 - -### Other -- updated the following local packages: sn_protocol, sn_transfers - -## [0.88.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.2...sn_client-v0.88.3) - 2023-08-31 - -### Other -- some logging updates - -## [0.88.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.1...sn_client-v0.88.2) - 2023-08-31 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.88.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.0...sn_client-v0.88.1) - 2023-08-31 - -### Added -- *(cli)* expose 'concurrency' flag -- *(cli)* increase put parallelisation - -### Other -- *(client)* reduce default concurrency -- *(client)* improve download concurrency. - -## [0.88.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.29...sn_client-v0.88.0) - 2023-08-30 - -### Added -- refactor to allow greater upload parallelisation -- one transfer per data set, mapped dbcs to content addrs -- [**breaking**] pay each chunk holder direct -- feat!(protocol): gets keys with GetStoreCost -- feat!(protocol): get price and pay for each chunk individually -- feat!(protocol): remove chunk merkletree to simplify payment - -### Fixed -- *(tokio)* remove tokio fs - -### Other -- *(node)* refactor churn test order -- *(deps)* bump tokio to 1.32.0 -- *(client)* refactor client wallet to reduce dbc clones -- *(client)* pass around content payments map mut ref -- *(client)* reduce transferoutputs cloning -- *(client)* error out early for invalid transfers -- *(node)* reenable payment fail check - -## [0.87.29](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.28...sn_client-v0.87.29) - 2023-08-30 - -### Other -- updated the following local packages: sn_networking - -## [0.87.28](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.27...sn_client-v0.87.28) - 2023-08-29 - -### Other -- updated the following local packages: sn_networking - -## [0.87.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.26...sn_client-v0.87.27) - 2023-08-24 - -### Other -- updated the following local packages: sn_registers, sn_transfers - -## [0.87.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.25...sn_client-v0.87.26) - 2023-08-22 - -### Other -- updated the following local packages: sn_networking - -## [0.87.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.24...sn_client-v0.87.25) - 2023-08-22 - -### Fixed -- fixes to allow upload file works properly - -## [0.87.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.23...sn_client-v0.87.24) - 2023-08-21 - -### Other -- updated the following local packages: sn_networking - -## [0.87.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.22...sn_client-v0.87.23) - 2023-08-21 - -### Other -- updated the following local packages: sn_networking - -## [0.87.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.21...sn_client-v0.87.22) - 2023-08-18 - -### Added -- remove client and node initial join flow - -## [0.87.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.20...sn_client-v0.87.21) - 2023-08-18 - -### Other -- updated the following local packages: sn_protocol - -## [0.87.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.19...sn_client-v0.87.20) - 2023-08-17 - -### Fixed -- *(client)* start bootstrap when we are connected to one peer - -## [0.87.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.18...sn_client-v0.87.19) - 2023-08-17 - -### Other -- updated the following local packages: sn_networking - -## [0.87.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.17...sn_client-v0.87.18) - 2023-08-17 - -### Fixed -- *(client)* use boostrap and fire Connecting event - -## [0.87.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.16...sn_client-v0.87.17) - 2023-08-17 - -### Other -- updated the following local packages: sn_networking - -## [0.87.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.15...sn_client-v0.87.16) - 2023-08-16 - -### Added -- *(client)* do not use cached proofs - -## [0.87.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.14...sn_client-v0.87.15) - 2023-08-16 - -### Added -- overpay by default to allow margin - -## [0.87.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.13...sn_client-v0.87.14) - 2023-08-15 - -### Other -- updated the following local packages: sn_networking - -## [0.87.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.12...sn_client-v0.87.13) - 2023-08-11 - -### Added -- *(transfers)* add resend loop for unconfirmed txs -- *(networking)* ensure we always use the highest price we find -- *(networking)* enable returning less than majority for store_cost -- *(client)* use store cost queries to pre populate cost and RT - -### Fixed -- *(client)* only_store_cost_if_higher missing else added - -### Other -- remove client inactivity random storage query -- *(node)* resend unconfirmed txs before asserting -- *(cli)* print cost info -- *(networking)* remove logs, fix typos and clippy issues -- overpay in advance to avoid storage cost calculation inconsistent - -## [0.87.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.11...sn_client-v0.87.12) - 2023-08-10 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.87.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.10...sn_client-v0.87.11) - 2023-08-10 - -### Other -- updated the following local packages: sn_networking - -## [0.87.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.9...sn_client-v0.87.10) - 2023-08-08 - -### Added -- *(transfers)* add get largest dbc for spending - -### Fixed -- *(node)* prevent panic in storage calcs - -### Other -- tidy store cost code - -## [0.87.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.8...sn_client-v0.87.9) - 2023-08-07 - -### Other -- updated the following local packages: sn_networking - -## [0.87.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.7...sn_client-v0.87.8) - 2023-08-07 - -### Added -- rework register addresses to include pk - -### Other -- rename network addresses confusing name method to xorname - -## [0.87.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.6...sn_client-v0.87.7) - 2023-08-04 - -### Other -- updated the following local packages: sn_networking - -## [0.87.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.5...sn_client-v0.87.6) - 2023-08-03 - -### Other -- updated the following local packages: sn_networking - -## [0.87.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.4...sn_client-v0.87.5) - 2023-08-03 - -### Other -- updated the following local packages: sn_networking - -## [0.87.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.3...sn_client-v0.87.4) - 2023-08-02 - -### Fixed -- do not create genesis when facuet already funded - -## [0.87.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.2...sn_client-v0.87.3) - 2023-08-01 - -### Other -- *(client)* reattempt to get_spend_from_network -- add more verificaiton for payments - -## [0.87.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.1...sn_client-v0.87.2) - 2023-08-01 - -### Other -- updated the following local packages: sn_protocol - -## [0.87.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.0...sn_client-v0.87.1) - 2023-08-01 - -### Added -- *(cli)* add no-verify flag to cli - -### Other -- fix double spend and remove arbitrary wait -- *(node)* verify faucet transactions before continuing -- *(netowrking)* change default re-attempt behaviour - -## [0.87.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.11...sn_client-v0.87.0) - 2023-08-01 - -### Other -- *(register)* [**breaking**] hashing the node of a Register to sign it instead of bincode-serialising it - -## [0.86.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.10...sn_client-v0.86.11) - 2023-07-31 - -### Other -- updated the following local packages: sn_networking - -## [0.86.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.9...sn_client-v0.86.10) - 2023-07-31 - -### Added -- carry out get_record re-attempts for critical record -- for put_record verification, NotEnoughCopies is acceptable - -### Fixed -- *(test)* using proper wallets during data_with_churn test - -### Other -- move PrettyPrintRecordKey to sn_protocol -- small refactors for failing CI -- more tracable logs regarding chunk payment prove - -## [0.86.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.8...sn_client-v0.86.9) - 2023-07-31 - -### Other -- updated the following local packages: sn_networking - -## [0.86.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.7...sn_client-v0.86.8) - 2023-07-28 - -### Other -- updated the following local packages: sn_networking - -## [0.86.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.6...sn_client-v0.86.7) - 2023-07-28 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.86.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.5...sn_client-v0.86.6) - 2023-07-28 - -### Other -- adapt all logging to use pretty record key - -## [0.86.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.4...sn_client-v0.86.5) - 2023-07-27 - -### Other -- updated the following local packages: sn_networking - -## [0.86.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.3...sn_client-v0.86.4) - 2023-07-26 - -### Fixed -- *(register)* Registers with same name but different tags were not being stored by the network - -### Other -- centralising RecordKey creation logic to make sure we always use the same for all content type - -## [0.86.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.2...sn_client-v0.86.3) - 2023-07-26 - -### Other -- updated the following local packages: sn_networking - -## [0.86.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.1...sn_client-v0.86.2) - 2023-07-26 - -### Other -- updated the following local packages: sn_networking - -## [0.86.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.0...sn_client-v0.86.1) - 2023-07-25 - -### Added -- *(replication)* replicate when our close group changes - -### Fixed -- *(client)* keep an active `ClientEvent` receiver - -### Other -- *(client)* get k_value from const fn - -## [0.86.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.55...sn_client-v0.86.0) - 2023-07-21 - -### Added -- *(protocol)* [**breaking**] make Chunks storage payment required - -### Other -- tokens transfers task in data_with_churn tests to use client apis instead of faucet helpers - -## [0.85.55](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.54...sn_client-v0.85.55) - 2023-07-20 - -### Other -- cleanup error types - -## [0.85.54](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.53...sn_client-v0.85.54) - 2023-07-19 - -### Added -- using kad::record for dbc spend ops -- *(CI)* dbc verfication during network churning test - -## [0.85.53](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.52...sn_client-v0.85.53) - 2023-07-19 - -### Other -- updated the following local packages: sn_protocol - -## [0.85.52](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.51...sn_client-v0.85.52) - 2023-07-18 - -### Other -- updated the following local packages: sn_networking - -## [0.85.51](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.50...sn_client-v0.85.51) - 2023-07-18 - -### Added -- safer registers requiring signatures -- *(networking)* remove LostRecordEvent - -### Fixed -- address PR comments -- client - -## [0.85.50](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.49...sn_client-v0.85.50) - 2023-07-18 - -### Other -- updated the following local packages: sn_networking - -## [0.85.49](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.48...sn_client-v0.85.49) - 2023-07-17 - -### Other -- updated the following local packages: sn_networking - -## [0.85.48](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.47...sn_client-v0.85.48) - 2023-07-17 - -### Added -- *(networking)* upgrade to libp2p 0.52.0 - -### Other -- *(networking)* log all connected peer count - -## [0.85.47](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.46...sn_client-v0.85.47) - 2023-07-17 - -### Added -- *(client)* keep storage payment proofs in local wallet - -## [0.85.46](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.45...sn_client-v0.85.46) - 2023-07-12 - -### Other -- client to upload paid chunks in batches - -## [0.85.45](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.44...sn_client-v0.85.45) - 2023-07-11 - -### Other -- updated the following local packages: sn_networking - -## [0.85.44](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.43...sn_client-v0.85.44) - 2023-07-11 - -### Fixed -- *(client)* publish register on creation - -## [0.85.43](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.42...sn_client-v0.85.43) - 2023-07-11 - -### Other -- updated the following local packages: sn_networking - -## [0.85.42](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.41...sn_client-v0.85.42) - 2023-07-10 - -### Other -- updated the following local packages: sn_networking - -## [0.85.41](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.40...sn_client-v0.85.41) - 2023-07-10 - -### Added -- client query register via get_record -- client upload Register via put_record - -## [0.85.40](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.39...sn_client-v0.85.40) - 2023-07-06 - -### Other -- updated the following local packages: sn_networking - -## [0.85.39](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.38...sn_client-v0.85.39) - 2023-07-06 - -### Added -- PutRecord response during client upload -- client upload chunk using kad::put_record - -### Other -- *(release)* sn_cli-v0.79.0/sn_logging-v0.2.0/sn_node-v0.86.0/sn_testnet-v0.1.76/sn_networking-v0.3.11 - -## [0.85.38](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.37...sn_client-v0.85.38) - 2023-07-05 - -### Added -- carry out validation for record_store::put - -## [0.85.37](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.36...sn_client-v0.85.37) - 2023-07-04 - -### Other -- demystify permissions - -## [0.85.36](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.35...sn_client-v0.85.36) - 2023-07-03 - -### Added -- append SAFE_PEERS to initial_peers after restart - -### Fixed -- *(text)* data_churn_test creates clients parsing SAFE_PEERS env - -### Other -- reduce SAMPLE_SIZE for the data_with_churn test -- some client log tidy up - -## [0.85.35](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.34...sn_client-v0.85.35) - 2023-06-29 - -### Other -- updated the following local packages: sn_networking - -## [0.85.34](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.33...sn_client-v0.85.34) - 2023-06-28 - -### Other -- updated the following local packages: sn_networking - -## [0.85.33](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.32...sn_client-v0.85.33) - 2023-06-28 - -### Added -- make the example work, fix sync when reg doesnt exist -- rework permissions, implement register cmd handlers -- register refactor, kad reg without cmds - -### Fixed -- rename UserRights to UserPermissions - -## [0.85.32](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.31...sn_client-v0.85.32) - 2023-06-28 - -### Other -- updated the following local packages: sn_networking - -## [0.85.31](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.30...sn_client-v0.85.31) - 2023-06-28 - -### Added -- *(node)* dial without PeerId - -## [0.85.30](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.29...sn_client-v0.85.30) - 2023-06-27 - -### Other -- updated the following local packages: sn_networking - -## [0.85.29](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.28...sn_client-v0.85.29) - 2023-06-27 - -### Other -- updated the following local packages: sn_networking - -## [0.85.28](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.27...sn_client-v0.85.28) - 2023-06-26 - -### Other -- updated the following local packages: sn_networking - -## [0.85.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.26...sn_client-v0.85.27) - 2023-06-26 - -### Other -- updated the following local packages: sn_networking - -## [0.85.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.25...sn_client-v0.85.26) - 2023-06-26 - -### Other -- *(release)* sn_cli-v0.78.9/sn_logging-v0.1.4/sn_node-v0.83.55/sn_testnet-v0.1.59/sn_networking-v0.1.24 - -## [0.85.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.24...sn_client-v0.85.25) - 2023-06-26 - -### Other -- payment proof map to use xorname as index instead of merkletree nodes type - -## [0.85.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.23...sn_client-v0.85.24) - 2023-06-24 - -### Other -- updated the following local packages: sn_networking - -## [0.85.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.22...sn_client-v0.85.23) - 2023-06-23 - -### Other -- updated the following local packages: sn_networking - -## [0.85.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.21...sn_client-v0.85.22) - 2023-06-23 - -### Added -- forward chunk when not being the closest -- repliate to peers lost record - -### Fixed -- client upload to peers closer to chunk - -## [0.85.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.20...sn_client-v0.85.21) - 2023-06-23 - -### Other -- updated the following local packages: sn_networking - -## [0.85.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.19...sn_client-v0.85.20) - 2023-06-22 - -### Other -- *(client)* initial refactor around uploads - -## [0.85.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.18...sn_client-v0.85.19) - 2023-06-22 - -### Fixed -- improve client upload speed - -## [0.85.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.17...sn_client-v0.85.18) - 2023-06-21 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.85.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.16...sn_client-v0.85.17) - 2023-06-21 - -### Other -- *(network)* remove `NetworkEvent::PutRecord` dead code - -## [0.85.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.15...sn_client-v0.85.16) - 2023-06-21 - -### Other -- remove unused error variants -- *(node)* obtain parent_tx from SignedSpend -- *(release)* sn_cli-v0.77.46/sn_logging-v0.1.3/sn_node-v0.83.42/sn_testnet-v0.1.46/sn_networking-v0.1.15 - -## [0.85.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.14...sn_client-v0.85.15) - 2023-06-20 - -### Added -- *(network)* validate `Record` on GET -- *(network)* validate and store `ReplicatedData` -- *(node)* perform proper validations on PUT -- *(network)* validate and store `Record` - -### Fixed -- *(node)* store parent tx along with `SignedSpend` - -### Other -- *(docs)* add more docs and comments - -## [0.85.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.13...sn_client-v0.85.14) - 2023-06-20 - -### Other -- updated the following local packages: sn_networking - -## [0.85.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.12...sn_client-v0.85.13) - 2023-06-20 - -### Added -- pay 1 nano per Chunk as temporary approach till net-invoices are implemented -- committing storage payment SignedSpends to the network -- nodes to verify input DBCs of Chunk payment proof were spent - -### Other -- specific error types for different payment proof verification scenarios -- include the Tx instead of output DBCs as part of storage payment proofs - -## [0.85.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.11...sn_client-v0.85.12) - 2023-06-20 - -### Other -- updated the following local packages: sn_networking - -## [0.85.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.10...sn_client-v0.85.11) - 2023-06-16 - -### Fixed -- reduce client mem usage during uploading - -## [0.85.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.9...sn_client-v0.85.10) - 2023-06-15 - -### Added -- add double spend test - -### Fixed -- parent spend issue - -## [0.85.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.8...sn_client-v0.85.9) - 2023-06-14 - -### Added -- include output DBC within payment proof for Chunks storage - -## [0.85.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.7...sn_client-v0.85.8) - 2023-06-14 - -### Other -- updated the following local packages: sn_networking - -## [0.85.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.6...sn_client-v0.85.7) - 2023-06-14 - -### Added -- *(client)* expose req/resp timeout to client cli - -## [0.85.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.5...sn_client-v0.85.6) - 2023-06-13 - -### Other -- *(release)* sn_cli-v0.77.12/sn_logging-v0.1.2/sn_node-v0.83.10/sn_testnet-v0.1.14/sn_networking-v0.1.6 - -## [0.85.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.4...sn_client-v0.85.5) - 2023-06-12 - -### Added -- remove spendbook rw locks, improve logging - -### Other -- remove uneeded printlns -- *(release)* sn_cli-v0.77.10/sn_record_store-v0.1.3/sn_node-v0.83.8/sn_testnet-v0.1.12/sn_networking-v0.1.4 - -## [0.85.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.3...sn_client-v0.85.4) - 2023-06-09 - -### Other -- manually change crate version - -## [0.85.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.2...sn_client-v0.85.3) - 2023-06-09 - -### Other -- more replication flow statistics during mem_check test - -## [0.85.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.1...sn_client-v0.85.2) - 2023-06-07 - -### Added -- bail out if empty list of addreses is provided for payment proof generation -- *(client)* add progress indicator for initial network connections -- attach payment proof when uploading Chunks -- collect payment proofs and make sure merkletree always has pow-of-2 leaves -- node side payment proof validation from a given Chunk, audit trail, and reason-hash -- use all Chunks of a file to generate payment the payment proof tree -- Chunk storage payment and building payment proofs - -### Fixed -- remove progress bar after it's finished. - -### Other -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1" -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1 -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2" -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2 -- small log wording updates -- exposing definition of merkletree nodes data type and additional doc in code -- making Chunk payment proof optional for now -- moving all payment proofs utilities into sn_transfers crate - -## [0.85.1](https://github.com/jacderida/safe_network/compare/sn_client-v0.85.0...sn_client-v0.85.1) - 2023-06-06 - -### Added -- refactor replication flow to using pull model diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml deleted file mode 100644 index a1d49b0508..0000000000 --- a/sn_client/Cargo.toml +++ /dev/null @@ -1,91 +0,0 @@ -[package] -authors = ["MaidSafe Developers "] -description = "Safe Network Client" -documentation = "https://docs.rs/sn_node" -edition = "2021" -homepage = "https://maidsafe.net" -license = "GPL-3.0" -name = "sn_client" -readme = "README.md" -repository = "https://github.com/maidsafe/safe_network" -version = "0.110.4" - -[features] -default = [] -local-discovery = ["sn_networking/local-discovery"] -open-metrics = ["sn_networking/open-metrics", "prometheus-client"] -test-utils = ["sn_peers_acquisition", "eyre"] -# required to pass on flag to node builds -websockets = ["sn_networking/websockets", "sn_protocol/websockets"] - - -[dependencies] -tokio = { version = "1.35.0", features = [ - "io-util", - "macros", - "rt", - "sync", - "time", -] } -bip39 = "2.0.0" -curv = { version = "0.10.1", package = "sn_curv", default-features = false, features = [ - "num-bigint", -] } -eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } -async-trait = "0.1" -backoff = { version = "0.4.0", features = ["tokio"] } -bls = { package = "blsttc", version = "8.0.1" } -bytes = { version = "1.0.1", features = ["serde"] } -crdts = "7.3.2" -custom_debug = "~0.6.1" -dashmap = "~6.1.0" -futures = "~0.3.13" -hex = "~0.4.3" -itertools = "~0.12.1" -libp2p = { version = "0.53", features = ["identify"] } -petgraph = { version = "0.6.4", features = ["serde-1"] } -prometheus-client = { version = "0.22", optional = true } -rand = { version = "~0.8.5", features = ["small_rng"] } -rayon = "1.8.0" -rmp-serde = "1.1.1" -self_encryption = "~0.30.0" -serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.18.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -tempfile = "3.6.0" -thiserror = "1.0.23" -tiny-keccak = "~2.0.2" -tracing = { version = "~0.1.26" } -xor_name = "5.0.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3", optional = true } -eyre = { version = "0.6.8", optional = true } - -[dev-dependencies] -assert_matches = "1.5.0" -dirs-next = "~2.0.0" -# add rand to libp2p -libp2p-identity = { version = "0.2.7", features = ["rand"] } -sn_client = { path = "../sn_client", features = ["test-utils"] } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_registers = { path = "../sn_registers", version = "0.3.21", features = [ - "test-utils", -] } - -[lints] -workspace = true - -# to allow wasm compilation -[lib] -crate-type = ["cdylib", "rlib"] - -[target.'cfg(target_arch = "wasm32")'.dependencies] -getrandom = { version = "0.2.12", features = ["js"] } -wasm-bindgen = "0.2.90" -wasm-bindgen-futures = "0.4.40" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -console_error_panic_hook = "0.1.6" -tracing-wasm = "0.2.1" -wasmtimer = "0.2.0" -web-sys = { version = "0.3.22", features = ["console"] } diff --git a/sn_client/README.md b/sn_client/README.md deleted file mode 100644 index 1e3d5a8259..0000000000 --- a/sn_client/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# `sn_client` - SAFE Network Client Library - -## Overview - -The `sn_client` library provides the core functionalities for interacting with the SAFE Network. It handles tasks such as connecting to the network, managing concurrency, and performing various network operations like data storage and retrieval. - -## Table of Contents - -- [Overview](#overview) -- [Installation](#installation) -- [Usage](#usage) - - [API Calls](#api-calls) -- [Running Tests](#running-tests) -- [Contributing](#contributing) - - [Conventional Commits](#conventional-commits) -- [License](#license) - -## Installation - -To include `sn_client` in your Rust project, add the following to your `Cargo.toml`: - -```toml -[dependencies] -sn_client = "latest_version_here" -``` - -## Usage - -To use `sn_client`, you first need to instantiate a client. Here's a simple example: - -```rust -use sn_client::Client; -let client = Client::new(signer, peers, req_response_timeout, custom_concurrency_limit).await?; -``` - -## Running Tests - -Prerequisites: -* A running local network. Refer to [`safe_network/README.md`](../README.md) to run a local test network. -* `SAFE_PEERS` environment variable or running the tests with `--feature=local-discovery`: - -```bash -$ cargo test --package sn_client --release --tests --features=local-discovery -``` - -## Contributing - -Please refer to the [Contributing Guidelines](../CONTRIBUTING.md) from the main directory for details on how to contribute to this project. - -### Conventional Commits - -We follow the [Conventional Commits](https://www.conventionalcommits.org/) specification for commit messages. Please adhere to this standard when contributing. - -## License - -This Safe Network repository is licensed under the General Public License (GPL), version 3 ([LICENSE](LICENSE) http://www.gnu.org/licenses/gpl-3.0.en.html). diff --git a/sn_client/src/acc_packet.rs b/sn_client/src/acc_packet.rs deleted file mode 100644 index 2d9570f34a..0000000000 --- a/sn_client/src/acc_packet.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::error::Result; -use bip39::Mnemonic; -use sn_transfers::{get_faucet_data_dir, HotWallet, MainSecretKey}; -use std::path::Path; - -pub mod user_secret; - -const DEFAULT_WALLET_DERIVIATION_PASSPHRASE: &str = "default"; - -/// Load a account from disk, with wallet, or create a new one using the mnemonic system -pub fn load_account_wallet_or_create_with_mnemonic( - root_dir: &Path, - derivation_passphrase: Option<&str>, -) -> Result { - let wallet = HotWallet::load_from(root_dir); - - match wallet { - Ok(wallet) => Ok(wallet), - Err(error) => { - warn!("Issue loading wallet, creating a new one: {error}"); - println!("Issue loading wallet from {root_dir:?}"); - - let mnemonic = load_or_create_mnemonic(root_dir)?; - let wallet = - secret_key_from_mnemonic(mnemonic, derivation_passphrase.map(|v| v.to_owned()))?; - - Ok(HotWallet::create_from_key(root_dir, wallet, None)?) - } - } -} - -pub fn load_or_create_mnemonic(root_dir: &Path) -> Result { - match user_secret::read_mnemonic_from_disk(root_dir) { - Ok(mnemonic) => { - println!( - "Found existing mnemonic in {root_dir:?}, this will be used for key derivation." - ); - info!("Using existing mnemonic from {root_dir:?}"); - Ok(mnemonic) - } - Err(error) => { - println!("No existing mnemonic found, creating a new one in {root_dir:?}."); - warn!("No existing mnemonic found in {root_dir:?}, creating new one. Error was: {error:?}"); - let mnemonic = user_secret::random_eip2333_mnemonic()?; - user_secret::write_mnemonic_to_disk(root_dir, &mnemonic)?; - Ok(mnemonic) - } - } -} - -pub fn secret_key_from_mnemonic( - mnemonic: Mnemonic, - derivation_passphrase: Option, -) -> Result { - let passphrase = - derivation_passphrase.unwrap_or(DEFAULT_WALLET_DERIVIATION_PASSPHRASE.to_owned()); - user_secret::account_wallet_secret_key(mnemonic, &passphrase) -} - -pub fn create_faucet_account_and_wallet() -> HotWallet { - let root_dir = get_faucet_data_dir(); - - println!("Loading faucet wallet... {root_dir:#?}"); - load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .expect("Faucet wallet shall be created successfully.") -} diff --git a/sn_client/src/acc_packet/user_secret.rs b/sn_client/src/acc_packet/user_secret.rs deleted file mode 100644 index 800018cfb7..0000000000 --- a/sn_client/src/acc_packet/user_secret.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - error::{Error, Result}, - transfers::MainSecretKey, -}; -use bls::SecretKey; -use curv::elliptic::curves::ECScalar; -use rand::RngCore; -use std::path::Path; -use xor_name::XorName; - -const MNEMONIC_FILENAME: &str = "account_secret"; - -const ACCOUNT_ROOT_XORNAME_DERIVATION: &str = "m/1/0"; - -const ACCOUNT_WALLET_DERIVATION: &str = "m/2/0"; - -pub fn random_eip2333_mnemonic() -> Result { - let mut entropy = [1u8; 32]; - let rng = &mut rand::rngs::OsRng; - rng.fill_bytes(&mut entropy); - let mnemonic = - bip39::Mnemonic::from_entropy(&entropy).map_err(|_error| Error::FailedToParseEntropy)?; - Ok(mnemonic) -} - -/// Derive a wallet secret key from the mnemonic for the account. -pub fn account_wallet_secret_key( - mnemonic: bip39::Mnemonic, - passphrase: &str, -) -> Result { - let seed = mnemonic.to_seed(passphrase); - - let root_sk = - eip2333::derive_master_sk(&seed).map_err(|_err| Error::InvalidMnemonicSeedPhrase)?; - let derived_key = eip2333::derive_child_sk(root_sk, ACCOUNT_WALLET_DERIVATION); - let key_bytes = derived_key.serialize(); - let sk = SecretKey::from_bytes(key_bytes.into()).map_err(|_err| Error::InvalidKeyBytes)?; - Ok(MainSecretKey::new(sk)) -} - -#[expect(dead_code)] // as yet unused, will be used soon -/// Derive an xorname from the mnemonic for the account to store data. -pub(crate) fn account_root_xorname(mnemonic: bip39::Mnemonic, passphrase: &str) -> Result { - let seed = mnemonic.to_seed(passphrase); - - let root_sk = - eip2333::derive_master_sk(&seed).map_err(|_err| Error::InvalidMnemonicSeedPhrase)?; - let derived_key = eip2333::derive_child_sk(root_sk, ACCOUNT_ROOT_XORNAME_DERIVATION); - let derived_key_bytes = derived_key.serialize(); - Ok(XorName::from_content(&derived_key_bytes)) -} - -pub fn write_mnemonic_to_disk(files_dir: &Path, mnemonic: &bip39::Mnemonic) -> Result<()> { - let filename = files_dir.join(MNEMONIC_FILENAME); - let content = mnemonic.to_string(); - std::fs::write(filename, content)?; - Ok(()) -} - -pub(super) fn read_mnemonic_from_disk(files_dir: &Path) -> Result { - let filename = files_dir.join(MNEMONIC_FILENAME); - let content = std::fs::read_to_string(filename)?; - let mnemonic = - bip39::Mnemonic::parse_normalized(&content).map_err(|_err| Error::FailedToParseMnemonic)?; - Ok(mnemonic) -} diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs deleted file mode 100644 index b279ed9e31..0000000000 --- a/sn_client/src/api.rs +++ /dev/null @@ -1,1182 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - error::{Error, Result}, - Client, ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver, ClientRegister, - WalletClient, -}; -use bls::{PublicKey, SecretKey, Signature}; -use libp2p::{ - identity::Keypair, - kad::{Quorum, Record}, - Multiaddr, PeerId, -}; -use rand::{thread_rng, Rng}; -use sn_networking::{ - get_signed_spend_from_record, multiaddr_is_global, - target_arch::{interval, spawn, timeout, Instant}, - GetRecordCfg, NetworkBuilder, NetworkError, NetworkEvent, PutRecordCfg, VerificationKind, -}; -use sn_protocol::{ - error::Error as ProtocolError, - messages::ChunkProof, - storage::{ - try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, - RecordKind, RegisterAddress, RetryStrategy, SpendAddress, - }, - NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, -}; -use sn_registers::{Permissions, SignedRegister}; -use sn_transfers::{ - CashNote, CashNoteRedemption, MainPubkey, NanoTokens, Payment, SignedSpend, TransferError, - GENESIS_SPEND_UNIQUE_KEY, -}; -#[cfg(target_arch = "wasm32")] -use std::path::PathBuf; -use std::{ - collections::{HashMap, HashSet}, - num::NonZeroUsize, - sync::Arc, -}; -use tokio::time::Duration; -use tracing::trace; -use xor_name::XorName; - -/// The maximum duration the client will wait for a connection to the network before timing out. -pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(30); - -/// The timeout duration for the client to receive any response from the network. -const INACTIVITY_TIMEOUT: Duration = Duration::from_secs(30); - -// Init during compilation, instead of runtime error that should never happen -// Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) -const QUORUM_N_IS_2: NonZeroUsize = match NonZeroUsize::new(2) { - Some(v) => v, - None => panic!("2 is not zero"), -}; - -impl Client { - /// A quick client with a random secret key and some peers. - pub async fn quick_start(peers: Option>) -> Result { - Self::new(SecretKey::random(), peers, None, None).await - } - - /// Instantiate a new client. - /// - /// Optionally specify the duration for the connection timeout. - /// - /// Defaults to [`CONNECTION_TIMEOUT`]. - /// - /// # Arguments - /// * 'signer' - [SecretKey] - /// * 'peers' - [Option]<[Vec]<[Multiaddr]>> - /// * 'connection_timeout' - [Option]<[Duration]> : Specification for client connection timeout set via Optional - /// * 'client_event_broadcaster' - [Option]<[ClientEventsBroadcaster]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn new( - signer: SecretKey, - peers: Option>, - connection_timeout: Option, - client_event_broadcaster: Option, - ) -> Result { - // If any of our contact peers has a global address, we'll assume we're in a global network. - let local = match peers { - Some(ref peers) => !peers.iter().any(multiaddr_is_global), - None => true, - }; - - info!("Startup a client with peers {peers:?} and local {local:?} flag"); - info!("Starting Kad swarm in client mode..."); - - #[cfg(target_arch = "wasm32")] - let root_dir = PathBuf::from("dummy path, wasm32/browser environments will not use this"); - #[cfg(not(target_arch = "wasm32"))] - let root_dir = std::env::temp_dir(); - trace!("Starting Kad swarm in client mode..{root_dir:?}."); - - let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local, root_dir); - - let (network, mut network_event_receiver, swarm_driver) = network_builder.build_client()?; - info!("Client constructed network and swarm_driver"); - - // If the events broadcaster is not provided by the caller, then we create a new one. - // This is not optional as we wait on certain events to connect to the network and return from this function. - let events_broadcaster = client_event_broadcaster.unwrap_or_default(); - - let client = Self { - network: network.clone(), - events_broadcaster, - signer: Arc::new(signer), - }; - - // subscribe to our events channel first, so we don't have intermittent - // errors if it does not exist and we cannot send to it. - // (eg, if PeerAdded happens faster than our events channel is created) - let mut client_events_rx = client.events_channel(); - - let _swarm_driver = spawn({ - trace!("Starting up client swarm_driver"); - swarm_driver.run() - }); - - // spawn task to dial to the given peers - let network_clone = network.clone(); - let _handle = spawn(async move { - if let Some(peers) = peers { - for addr in peers { - trace!(%addr, "dialing initial peer"); - - if let Err(err) = network_clone.dial(addr.clone()).await { - tracing::error!(%addr, "Failed to dial: {err:?}"); - }; - } - } - }); - - // spawn task to wait for NetworkEvent and check for inactivity - let mut client_clone = client.clone(); - let _event_handler = spawn(async move { - let mut peers_added: usize = 0; - loop { - match timeout(INACTIVITY_TIMEOUT, network_event_receiver.recv()).await { - Ok(event) => { - let the_event = match event { - Some(the_event) => the_event, - None => { - error!("The `NetworkEvent` channel has been closed"); - continue; - } - }; - - let start = Instant::now(); - let event_string = format!("{the_event:?}"); - if let Err(err) = - client_clone.handle_network_event(the_event, &mut peers_added) - { - warn!("Error handling network event: {err}"); - } - trace!( - "Handled network event in {:?}: {:?}", - start.elapsed(), - event_string - ); - } - Err(_elapse_err) => { - debug!("Client inactivity... waiting for a network event"); - client_clone - .events_broadcaster - .broadcast(ClientEvent::InactiveClient(INACTIVITY_TIMEOUT)); - } - } - } - }); - - // loop to connect to the network - let mut is_connected = false; - let connection_timeout = connection_timeout.unwrap_or(CONNECTION_TIMEOUT); - let mut unsupported_protocol_tracker: Option<(String, String)> = None; - - debug!("Client connection timeout: {connection_timeout:?}"); - let mut connection_timeout_interval = interval(connection_timeout); - // first tick completes immediately - connection_timeout_interval.tick().await; - - loop { - tokio::select! { - _ = connection_timeout_interval.tick() => { - if !is_connected { - if let Some((our_protocol, their_protocols)) = unsupported_protocol_tracker { - error!("Timeout: Client could not connect to the network as it does not support the protocol"); - break Err(Error::UnsupportedProtocol(our_protocol, their_protocols)); - } - error!("Timeout: Client failed to connect to the network within {connection_timeout:?}"); - break Err(Error::ConnectionTimeout(connection_timeout)); - } - } - event = client_events_rx.recv() => { - match event { - // we do not error out directly as we might still connect if the other initial peers are from - // the correct network. - Ok(ClientEvent::PeerWithUnsupportedProtocol { our_protocol, their_protocol }) => { - warn!(%our_protocol, %their_protocol, "Client tried to connect to a peer with an unsupported protocol. Tracking the latest one"); - unsupported_protocol_tracker = Some((our_protocol, their_protocol)); - } - Ok(ClientEvent::ConnectedToNetwork) => { - is_connected = true; - info!("Client connected to the Network {is_connected:?}."); - break Ok(()); - } - Ok(ClientEvent::InactiveClient(timeout)) => { - if is_connected { - info!("The client was inactive for {timeout:?}."); - } else { - info!("The client still does not know enough network nodes."); - } - } - Err(err) => { - error!("Unexpected error during client startup {err:?}"); - println!("Unexpected error during client startup {err:?}"); - break Err(err.into()); - } - _ => {} - } - }} - }?; - - Ok(client) - } - - fn handle_network_event(&mut self, event: NetworkEvent, peers_added: &mut usize) -> Result<()> { - match event { - NetworkEvent::PeerAdded(peer_id, _connected_peer) => { - debug!("PeerAdded: {peer_id}"); - *peers_added += 1; - - // notify the listeners that we are waiting on CLOSE_GROUP_SIZE peers before emitting ConnectedToNetwork - self.events_broadcaster.broadcast(ClientEvent::PeerAdded { - max_peers_to_connect: CLOSE_GROUP_SIZE, - }); - // In case client running in non-local-discovery mode, - // it may take some time to fill up the RT. - // To avoid such delay may fail the query with RecordNotFound, - // wait till certain amount of peers populated into RT - if *peers_added >= CLOSE_GROUP_SIZE { - self.events_broadcaster - .broadcast(ClientEvent::ConnectedToNetwork); - } else { - debug!("{peers_added}/{CLOSE_GROUP_SIZE} initial peers found.",); - } - } - NetworkEvent::PeerWithUnsupportedProtocol { - our_protocol, - their_protocol, - } => { - self.events_broadcaster - .broadcast(ClientEvent::PeerWithUnsupportedProtocol { - our_protocol, - their_protocol, - }); - } - _other => {} - } - - Ok(()) - } - - /// Get the client events channel. - /// - /// Return Type: - /// - /// [ClientEventsReceiver] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error, ClientEvent}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Using client.events_channel() to publish messages - /// let mut events_channel = client.events_channel(); - /// while let Ok(event) = events_channel.recv().await { - /// // Handle the event - /// } - /// - /// # Ok(()) - /// # } - /// ``` - pub fn events_channel(&self) -> ClientEventsReceiver { - self.events_broadcaster.subscribe() - } - - /// Sign the given data. - /// - /// # Arguments - /// * 'data' - bytes; i.e bytes of an sn_registers::Register instance - /// - /// Return Type: - /// - /// [Signature] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use tracing::callsite::register; - /// use xor_name::XorName; - /// use sn_registers::Register; - /// use sn_protocol::messages::RegisterCmd; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// - /// // Set up register prerequisites - /// let mut rng = rand::thread_rng(); - /// let xorname = XorName::random(&mut rng); - /// let owner_sk = SecretKey::random(); - /// let owner_pk = owner_sk.public_key(); - /// - /// // set up register - /// let mut register = Register::new(owner_pk, xorname, Default::default()); - /// let mut register_clone = register.clone(); - /// - /// // Use of client.sign() with register through RegisterCmd::Create - /// let cmd = RegisterCmd::Create { - /// register, - /// signature: client.sign(register_clone.bytes()?), - /// }; - /// # Ok(()) - /// # } - /// ``` - pub fn sign>(&self, data: T) -> Signature { - self.signer.sign(data) - } - - /// Return a reference to the signer secret key. - /// - /// Return Type: - /// - /// [SecretKey] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let secret_key_reference = client.signer(); - /// # Ok(()) - /// # } - /// ``` - pub fn signer(&self) -> &SecretKey { - &self.signer - } - - /// Return the public key of the data signing key. - /// - /// Return Type: - /// - /// [PublicKey] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let public_key_reference = client.signer_pk(); - /// # Ok(()) - /// # } - /// ``` - pub fn signer_pk(&self) -> PublicKey { - self.signer.public_key() - } - - /// Set the signing key for this client. - /// - /// # Arguments - /// * 'sk' - [SecretKey] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let mut client = Client::new(SecretKey::random(), None, None, None).await?; - /// client.set_signer_key(SecretKey::random()); - /// # Ok(()) - /// # } - /// ``` - pub fn set_signer_key(&mut self, sk: SecretKey) { - self.signer = Arc::new(sk); - } - - /// Get a register from network - /// - /// # Arguments - /// * 'address' - [RegisterAddress] - /// - /// Return Type: - /// - /// Result<[SignedRegister]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// // Set up a client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Get a signed register - /// let signed_register = client.get_signed_register_from_network(address); - /// # Ok(()) - /// # } - /// ``` - pub async fn get_signed_register_from_network( - &self, - address: RegisterAddress, - ) -> Result { - let key = NetworkAddress::from_register_address(address).to_record_key(); - - let maybe_records = self.network.get_register_record_from_network(key).await?; - merge_register_records(address, &maybe_records) - } - - /// Retrieve a Register from the network. - /// - /// # Arguments - /// * 'address' - [RegisterAddress] - /// - /// Return Type: - /// - /// Result<[ClientRegister]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// // Set up a client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Get the register - /// let retrieved_register = client.get_register(address); - /// # Ok(()) - /// # } - /// ``` - pub async fn get_register(&self, address: RegisterAddress) -> Result { - info!("Retrieving a Register replica at {address}"); - ClientRegister::retrieve(self.clone(), address).await - } - - /// Create a new Register on the Network. - /// Tops up payments and retries if necessary and verification failed - /// - /// # Arguments - /// * 'address' - [XorName] - /// * 'wallet_client' - [WalletClient] - /// * 'verify_store' - Boolean - /// * 'perms' - [Permissions] - /// - /// Return Type: - /// - /// Result<([ClientRegister], [NanoTokens], [NanoTokens])> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{MainSecretKey}; - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// // Set up Client, Wallet, etc - /// use sn_registers::Permissions; - /// use sn_transfers::HotWallet; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Example: - /// let (mut client_register, _storage_cost, _royalties_fees) = client - /// .create_and_pay_for_register( - /// xorname, - /// &mut wallet_client, - /// true, - /// Permissions::default(), - /// ) - /// .await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn create_and_pay_for_register( - &self, - address: XorName, - wallet_client: &mut WalletClient, - verify_store: bool, - perms: Permissions, - ) -> Result<(ClientRegister, NanoTokens, NanoTokens)> { - info!("Instantiating a new Register replica with address {address:?}"); - let (reg, mut total_cost, mut total_royalties) = ClientRegister::create_online( - self.clone(), - address, - wallet_client, - false, - perms.clone(), - ) - .await?; - - debug!("{address:?} Created in theory"); - let reg_address = reg.address(); - if verify_store { - debug!("We should verify stored at {address:?}"); - let mut stored = self.verify_register_stored(*reg_address).await.is_ok(); - - while !stored { - info!("Register not completely stored on the network yet. Retrying..."); - // this verify store call here ensures we get the record from Quorum::all - let (reg, top_up_cost, royalties_top_up) = ClientRegister::create_online( - self.clone(), - address, - wallet_client, - true, - perms.clone(), - ) - .await?; - let reg_address = reg.address(); - - total_cost = total_cost - .checked_add(top_up_cost) - .ok_or(Error::TotalPriceTooHigh)?; - total_royalties = total_cost - .checked_add(royalties_top_up) - .ok_or(Error::Wallet(sn_transfers::WalletError::from( - sn_transfers::TransferError::ExcessiveNanoValue, - )))?; - stored = self.verify_register_stored(*reg_address).await.is_ok(); - } - } - - Ok((reg, total_cost, total_royalties)) - } - - /// Store `Chunk` as a record. Protected method. - /// - /// # Arguments - /// * 'chunk' - [Chunk] - /// * 'payee' - [PeerId] - /// * 'payment' - [Payment] - /// * 'verify_store' - Boolean - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default - /// - pub(super) async fn store_chunk( - &self, - chunk: Chunk, - payee: PeerId, - payment: Payment, - verify_store: bool, - retry_strategy: Option, - ) -> Result<()> { - info!("Store chunk: {:?}", chunk.address()); - let key = chunk.network_address().to_record_key(); - let retry_strategy = Some(retry_strategy.unwrap_or(RetryStrategy::Quick)); - - let record_kind = RecordKind::ChunkWithPayment; - let record = Record { - key: key.clone(), - value: try_serialize_record(&(payment, chunk.clone()), record_kind)?.to_vec(), - publisher: None, - expires: None, - }; - - let verification = if verify_store { - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::N(QUORUM_N_IS_2), - retry_strategy, - target_record: None, // Not used since we use ChunkProof - expected_holders: Default::default(), - }; - // The `ChunkWithPayment` is only used to send out via PutRecord. - // The holders shall only hold the `Chunk` copies. - // Hence the fetched copies shall only be a `Chunk` - - let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk)?.to_vec(); - let random_nonce = thread_rng().gen::(); - let expected_proof = ChunkProof::new(&stored_on_node, random_nonce); - - Some(( - VerificationKind::ChunkProof { - expected_proof, - nonce: random_nonce, - }, - verification_cfg, - )) - } else { - None - }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy, - use_put_record_to: Some(vec![payee]), - verification, - }; - Ok(self.network.put_record(record, &put_cfg).await?) - } - - /// Get chunk from chunk address. - /// - /// # Arguments - /// * 'address' - [ChunkAddress] - /// * 'show_holders' - Boolean - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default - /// - /// Return Type: - /// - /// Result<[Chunk]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use xor_name::XorName; - /// use sn_protocol::storage::ChunkAddress; - /// // client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // chunk address - /// let mut rng = rand::thread_rng(); - /// let xorname = XorName::random(&mut rng); - /// let chunk_address = ChunkAddress::new(xorname); - /// // get chunk - /// let chunk = client.get_chunk(chunk_address,true, None).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn get_chunk( - &self, - address: ChunkAddress, - show_holders: bool, - retry_strategy: Option, - ) -> Result { - info!("Getting chunk: {address:?}"); - let key = NetworkAddress::from_chunk_address(address).to_record_key(); - - let expected_holders = if show_holders { - let result: HashSet<_> = self - .network - .get_closest_peers(&NetworkAddress::from_chunk_address(address), true) - .await? - .iter() - .cloned() - .collect(); - result - } else { - Default::default() - }; - - let get_cfg = GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: Some(retry_strategy.unwrap_or(RetryStrategy::Quick)), - target_record: None, - expected_holders, - }; - let record = self.network.get_record_from_network(key, &get_cfg).await?; - let header = RecordHeader::from_record(&record)?; - if let RecordKind::Chunk = header.kind { - let chunk: Chunk = try_deserialize_record(&record)?; - Ok(chunk) - } else { - Err(NetworkError::RecordKindMismatch(RecordKind::Chunk).into()) - } - } - - /// Verify if a `Chunk` is stored by expected nodes on the network. - /// Single local use. Marked Private. - pub async fn verify_chunk_stored(&self, chunk: &Chunk) -> Result<()> { - let address = chunk.network_address(); - info!("Verifying chunk: {address:?}"); - let random_nonce = thread_rng().gen::(); - let record_value = try_serialize_record(&chunk, RecordKind::Chunk)?; - let expected_proof = ChunkProof::new(record_value.as_ref(), random_nonce); - - if let Err(err) = self - .network - .verify_chunk_existence( - address.clone(), - random_nonce, - expected_proof, - Quorum::N(QUORUM_N_IS_2), - None, - ) - .await - { - error!("Failed to verify the existence of chunk {address:?} with err {err:?}"); - return Err(err.into()); - } - - Ok(()) - } - - /// Verify if a `Register` is stored by expected nodes on the network. - /// - /// # Arguments - /// * 'address' - [RegisterAddress] - /// - /// Return Type: - /// - /// Result<[SignedRegister]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// // Set up Client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Verify address is stored - /// let is_stored = client.verify_register_stored(address).await.is_ok(); - /// # Ok(()) - /// # } - /// ``` - pub async fn verify_register_stored(&self, address: RegisterAddress) -> Result { - info!("Verifying register: {address:?}"); - self.get_signed_register_from_network(address).await - } - - /// Quickly checks if a `Register` is stored by expected nodes on the network. - /// - /// To be used for initial register put checks eg, if we expect the data _not_ - /// to exist, we can use it and essentially use the RetryStrategy::Quick under the hood - /// - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// // Set up Client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Verify address is stored - /// let is_stored = client.verify_register_stored(address).await.is_ok(); - /// # Ok(()) - /// # } - /// ``` - pub async fn quickly_check_if_register_stored( - &self, - address: RegisterAddress, - ) -> Result { - info!("Quickly checking for existing register : {address:?}"); - self.get_signed_register_from_network(address).await - } - - /// Send a `SpendCashNote` request to the network. Protected method. - /// - /// # Arguments - /// * 'spend' - [SignedSpend] - /// * 'verify_store' - Boolean - /// - pub(crate) async fn network_store_spend( - &self, - spend: SignedSpend, - verify_store: bool, - ) -> Result<()> { - let unique_pubkey = *spend.unique_pubkey(); - let cash_note_addr = SpendAddress::from_unique_pubkey(&unique_pubkey); - let network_address = NetworkAddress::from_spend_address(cash_note_addr); - - let key = network_address.to_record_key(); - - let record_kind = RecordKind::Spend; - let record = Record { - key: key.clone(), - value: try_serialize_record(&[spend], record_kind)?.to_vec(), - publisher: None, - expires: None, - }; - - let pretty_key = PrettyPrintRecordKey::from(&key); - info!("Sending spend {unique_pubkey:?} to the network via put_record, with addr of {cash_note_addr:?} - {pretty_key:?}, size of {}", - record.value.len()); - - let (record_to_verify, expected_holders) = if verify_store { - let expected_holders: HashSet<_> = self - .network - .get_closest_peers(&network_address, true) - .await? - .iter() - .cloned() - .collect(); - (Some(record.clone()), expected_holders) - } else { - (None, Default::default()) - }; - - // When there is retry on Put side, no need to have a retry on Get - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: None, - target_record: record_to_verify, - expected_holders, - }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::Majority, - retry_strategy: Some(RetryStrategy::Persistent), - use_put_record_to: None, - verification: Some((VerificationKind::Network, verification_cfg)), - }; - Ok(self.network.put_record(record, &put_cfg).await?) - } - - /// Get a spend from network. - /// - /// # Arguments - /// * 'address' - [SpendAddress] - /// - /// Return Type: - /// - /// Result<[SignedSpend]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// use xor_name::XorName; - /// use sn_transfers::SpendAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Create a SpendAddress - /// let mut rng = rand::thread_rng(); - /// let xorname = XorName::random(&mut rng); - /// let spend_address = SpendAddress::new(xorname); - /// // Here we get the spend address - /// let spend = client.get_spend_from_network(spend_address).await?; - /// // Example: We can use the spend to get its unique public key: - /// let unique_pubkey = spend.unique_pubkey(); - /// # Ok(()) - /// # } - /// ``` - pub async fn get_spend_from_network(&self, address: SpendAddress) -> Result { - self.try_fetch_spend_from_network( - address, - GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: Some(RetryStrategy::Balanced), - target_record: None, - expected_holders: Default::default(), - }, - ) - .await - } - - /// Try to peek a spend by just fetching one copy of it. - /// Useful to help decide whether a re-put is necessary, or a spend exists already - /// (client side verification). - pub async fn peek_a_spend(&self, address: SpendAddress) -> Result { - self.try_fetch_spend_from_network( - address, - GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: None, - target_record: None, - expected_holders: Default::default(), - }, - ) - .await - } - - /// This is a similar funcation to `get_spend_from_network` to get a spend from network. - /// Just using different `RetryStrategy` to improve the performance during crawling. - pub async fn crawl_spend_from_network(&self, address: SpendAddress) -> Result { - self.try_fetch_spend_from_network( - address, - GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: None, - target_record: None, - expected_holders: Default::default(), - }, - ) - .await - } - - /// Try to confirm the Genesis spend doesn't present in the network yet. - /// It shall be quick, and any signle returned copy shall consider as error. - pub async fn is_genesis_spend_present(&self) -> bool { - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - self.peek_a_spend(genesis_addr).await.is_ok() - } - - async fn try_fetch_spend_from_network( - &self, - address: SpendAddress, - get_cfg: GetRecordCfg, - ) -> Result { - let key = NetworkAddress::from_spend_address(address).to_record_key(); - - info!( - "Getting spend at {address:?} with record_key {:?}", - PrettyPrintRecordKey::from(&key) - ); - let record = self - .network - .get_record_from_network(key.clone(), &get_cfg) - .await?; - info!( - "For spend at {address:?} got record from the network, {:?}", - PrettyPrintRecordKey::from(&record.key) - ); - - let signed_spend = get_signed_spend_from_record(&address, &record)?; - - // check addr - let spend_addr = SpendAddress::from_unique_pubkey(signed_spend.unique_pubkey()); - if address != spend_addr { - let s = format!("Spend got from the Network at {address:?} contains different spend address: {spend_addr:?}"); - warn!("{s}"); - return Err(Error::Transfer(TransferError::InvalidSpendValue( - *signed_spend.unique_pubkey(), - ))); - } - - // check spend - match signed_spend.verify() { - Ok(()) => { - trace!("Verified signed spend got from network for {address:?}"); - Ok(signed_spend.clone()) - } - Err(err) => { - warn!("Invalid signed spend got from network for {address:?}: {err:?}."); - Err(Error::CouldNotVerifyTransfer(format!( - "Verification failed for spent at {address:?} with error {err:?}" - ))) - } - } - } - - /// This function is used to receive a Vector of CashNoteRedemptions and turn them back into spendable CashNotes. - /// For this we need a network connection. - /// Verify CashNoteRedemptions and rebuild spendable currency from them. - /// Returns an `Error::InvalidTransfer` if any CashNoteRedemption is not valid - /// Else returns a list of CashNotes that can be spent by the owner. - /// - /// # Arguments - /// * 'main_pubkey' - [MainPubkey] - /// * 'cashnote_redemptions' - [CashNoteRedemption] - /// - /// Return Type: - /// - /// Result<[Vec]<[CashNote]>> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use sn_transfers::{CashNote, CashNoteRedemption, MainPubkey}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Create a main public key - /// let pk = SecretKey::random().public_key(); - /// let main_pub_key = MainPubkey::new(pk); - /// // Create a Cash Note Redemption Vector - /// let cash_note = CashNote::from_hex("&hex").unwrap(); - /// let cashNoteRedemption = CashNoteRedemption::from_cash_note(&cash_note); - /// let vector = vec![cashNoteRedemption.clone(), cashNoteRedemption.clone()]; - /// // Verify the cash note redemptions - /// let cash_notes = client.verify_cash_notes_redemptions(main_pub_key,&vector); - /// # Ok(()) - /// # } - /// ``` - pub async fn verify_cash_notes_redemptions( - &self, - main_pubkey: MainPubkey, - cashnote_redemptions: &[CashNoteRedemption], - ) -> Result> { - let cash_notes = self - .network - .verify_cash_notes_redemptions(main_pubkey, cashnote_redemptions) - .await?; - Ok(cash_notes) - } -} - -fn get_register_from_record(record: &Record) -> Result { - let header = RecordHeader::from_record(record)?; - - if let RecordKind::Register = header.kind { - let register = try_deserialize_record::(record)?; - Ok(register) - } else { - error!("RecordKind mismatch while trying to retrieve a signed register"); - Err(NetworkError::RecordKindMismatch(RecordKind::Register).into()) - } -} - -/// if multiple register records where found for a given key, merge them into a single register -fn merge_register_records( - address: RegisterAddress, - map: &HashMap, -) -> Result { - let key = NetworkAddress::from_register_address(address).to_record_key(); - let pretty_key = PrettyPrintRecordKey::from(&key); - info!( - "Got {} register records from the network for key: {pretty_key:?}", - map.len() - ); - let mut all_registers = vec![]; - for record in map.values() { - match get_register_from_record(record) { - Ok(r) => all_registers.push(r), - Err(e) => { - warn!("Ignoring invalid register record {pretty_key:?} with error {e:?}"); - continue; - } - } - } - - // get the first valid register - let one_valid_reg = if let Some(r) = all_registers.clone().iter().find(|r| r.verify().is_ok()) { - r.clone() - } else { - error!("No valid register records found for {key:?}"); - return Err(Error::Protocol(ProtocolError::RegisterNotFound(Box::new( - address, - )))); - }; - - // merge it with the others if they are valid - let register: SignedRegister = all_registers.into_iter().fold(one_valid_reg, |mut acc, r| { - if acc.verified_merge(&r).is_err() { - warn!("Skipping register that failed to merge. Entry found for {key:?}"); - } - acc - }); - - Ok(register) -} - -#[cfg(test)] -mod tests { - use std::collections::BTreeSet; - - use sn_registers::Register; - - use super::*; - - #[test] - fn test_merge_register_records() -> eyre::Result<()> { - let mut rng = rand::thread_rng(); - let meta = XorName::random(&mut rng); - let owner_sk = SecretKey::random(); - let owner_pk = owner_sk.public_key(); - let address = RegisterAddress::new(meta, owner_pk); - - // prepare registers - let mut register_root = Register::new(owner_pk, meta, Default::default()); - let (root_hash, _) = - register_root.write(b"root_entry".to_vec(), &BTreeSet::default(), &owner_sk)?; - let root = BTreeSet::from_iter(vec![root_hash]); - let signed_root = register_root.clone().into_signed(&owner_sk)?; - - let mut register1 = register_root.clone(); - let (_hash, op1) = register1.write(b"entry1".to_vec(), &root, &owner_sk)?; - let mut signed_register1 = signed_root.clone(); - signed_register1.add_op(op1)?; - - let mut register2 = register_root.clone(); - let (_hash, op2) = register2.write(b"entry2".to_vec(), &root, &owner_sk)?; - let mut signed_register2 = signed_root; - signed_register2.add_op(op2)?; - - let mut register_bad = Register::new(owner_pk, meta, Default::default()); - let (_hash, _op_bad) = - register_bad.write(b"bad_root".to_vec(), &BTreeSet::default(), &owner_sk)?; - let invalid_sig = register2.sign(&owner_sk)?; // steal sig from something else - let signed_register_bad = SignedRegister::new(register_bad, invalid_sig); - - // prepare records - let record1 = Record { - key: NetworkAddress::from_register_address(address).to_record_key(), - value: try_serialize_record(&signed_register1, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - let xorname1 = XorName::from_content(&record1.value); - let record2 = Record { - key: NetworkAddress::from_register_address(address).to_record_key(), - value: try_serialize_record(&signed_register2, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - let xorname2 = XorName::from_content(&record2.value); - let record_bad = Record { - key: NetworkAddress::from_register_address(address).to_record_key(), - value: try_serialize_record(&signed_register_bad, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - let xorname_bad = XorName::from_content(&record_bad.value); - - // test with 2 valid records: should return the two merged - let mut expected_merge = signed_register1.clone(); - expected_merge.merge(&signed_register2)?; - let map = HashMap::from_iter(vec![(xorname1, record1.clone()), (xorname2, record2)]); - let reg = merge_register_records(address, &map)?; // Ok - assert_eq!(reg, expected_merge); - - // test with 1 valid record and 1 invalid record: should return the valid one - let map = HashMap::from_iter(vec![(xorname1, record1), (xorname2, record_bad.clone())]); - let reg = merge_register_records(address, &map)?; // Ok - assert_eq!(reg, signed_register1); - - // test with 2 invalid records: should error out - let map = HashMap::from_iter(vec![ - (xorname_bad, record_bad.clone()), - (xorname_bad, record_bad), - ]); - let res = merge_register_records(address, &map); // Err - assert!(res.is_err()); - - Ok(()) - } -} diff --git a/sn_client/src/audit/dag_crawling.rs b/sn_client/src/audit/dag_crawling.rs deleted file mode 100644 index fa00a5078f..0000000000 --- a/sn_client/src/audit/dag_crawling.rs +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{Client, Error, SpendDag}; - -use dashmap::DashMap; -use futures::{ - future::join_all, - stream::{self, StreamExt}, -}; -use sn_networking::{GetRecordError, NetworkError}; -use sn_transfers::{ - NanoTokens, SignedSpend, SpendAddress, SpendReason, UniquePubkey, WalletError, WalletResult, - DEFAULT_NETWORK_ROYALTIES_PK, GENESIS_SPEND_UNIQUE_KEY, NETWORK_ROYALTIES_PK, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::sync::mpsc::Sender; - -const SPENDS_PROCESSING_BUFFER_SIZE: usize = 4096; - -enum InternalGetNetworkSpend { - Spend(Box), - DoubleSpend(Vec), - NotFound, - Error(Error), -} - -impl Client { - pub async fn new_dag_with_genesis_only(&self) -> WalletResult { - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - let mut dag = SpendDag::new(genesis_addr); - match self.get_spend_from_network(genesis_addr).await { - Ok(spend) => { - dag.insert(genesis_addr, spend); - } - Err(Error::Network(NetworkError::DoubleSpendAttempt(spends))) => { - println!("Burnt spend detected at Genesis: {genesis_addr:?}"); - warn!("Burnt spend detected at Genesis: {genesis_addr:?}"); - for (i, spend) in spends.into_iter().enumerate() { - let reason = spend.reason(); - let amount = spend.spend.amount(); - let ancestors_len = spend.spend.ancestors.len(); - let descendants_len = spend.spend.descendants.len(); - let roy_len = spend.spend.network_royalties().len(); - warn!( - "burnt spend entry {i} reason {reason:?}, amount {amount}, ancestors: {ancestors_len}, descendants: {descendants_len}, royalties: {roy_len}, {:?} - {:?}", - spend.spend.ancestors, spend.spend.descendants - ); - dag.insert(genesis_addr, spend); - } - } - Err(e) => return Err(WalletError::FailedToGetSpend(e.to_string())), - }; - - Ok(dag) - } - - /// Builds a SpendDag from a given SpendAddress recursively following descendants all the way to UTxOs - /// Started from Genesis this gives the entire SpendDag of the Network at a certain point in time - /// Once the DAG collected, optionally verifies and records errors in the DAG - /// - /// ```text - /// -> Spend7 ---> UTXO_11 - /// / - /// Genesis -> Spend1 -----> Spend2 ---> Spend5 ---> UTXO_10 - /// \ - /// ---> Spend3 ---> Spend6 ---> UTXO_9 - /// \ - /// -> Spend4 ---> UTXO_8 - /// - /// ``` - pub async fn spend_dag_build_from( - &self, - spend_addr: SpendAddress, - spend_processing: Option>, - verify: bool, - ) -> WalletResult { - let (tx, mut rx) = tokio::sync::mpsc::channel(SPENDS_PROCESSING_BUFFER_SIZE); - - // start crawling from the given spend address - let self_clone = self.clone(); - let crawl_handle = - tokio::spawn(async move { self_clone.spend_dag_crawl_from(spend_addr, tx).await }); - - // start DAG building from the spends gathered while crawling - // forward spends to processing if provided - let build_handle: tokio::task::JoinHandle> = - tokio::spawn(async move { - debug!("Starting building DAG from {spend_addr:?}..."); - let now = std::time::Instant::now(); - let mut dag = SpendDag::new(spend_addr); - while let Some(spend) = rx.recv().await { - let addr = spend.address(); - debug!( - "Inserting spend at {addr:?} size: {}", - dag.all_spends().len() - ); - dag.insert(addr, spend.clone()); - if let Some(sender) = &spend_processing { - let outputs = spend.spend.descendants.len() as u64; - sender - .send((spend, outputs, false)) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - } - info!( - "Done gathering DAG of size: {} in {:?}", - dag.all_spends().len(), - now.elapsed() - ); - Ok(dag) - }); - - // wait for both to finish - let (crawl_res, build_res) = tokio::join!(crawl_handle, build_handle); - crawl_res.map_err(|e| { - WalletError::SpendProcessing(format!("Failed to Join crawling results {e}")) - })??; - let mut dag = build_res.map_err(|e| { - WalletError::SpendProcessing(format!("Failed to Join DAG building results {e}")) - })??; - - // verify the DAG - if verify { - info!("Now verifying SpendDAG from {spend_addr:?} and recording errors..."); - let start = std::time::Instant::now(); - if let Err(e) = dag.record_faults(&dag.source()) { - let s = format!( - "Collected DAG starting at {spend_addr:?} is invalid, this is probably a bug: {e}" - ); - error!("{s}"); - return Err(WalletError::Dag(s)); - } - let elapsed = start.elapsed(); - info!("Finished verifying SpendDAG from {spend_addr:?} in {elapsed:?}"); - } - - Ok(dag) - } - - /// Get spends from a set of given SpendAddresses - /// Drain the addresses at the same layer first, then: - /// 1, return failed_utxos for re-attempt (with insertion time stamp) - /// 2, return fetched_address to avoid un-necessary re-attempts - /// 3, return addrs_for_further_track for further track - pub async fn crawl_to_next_utxos( - &self, - addrs_to_get: BTreeMap, - sender: Sender<(SignedSpend, u64, bool)>, - reattempt_seconds: u64, - ) -> ( - BTreeMap, - Vec, - BTreeSet<(SpendAddress, NanoTokens)>, - ) { - // max concurrency for the tasks of fetching records from network. - const MAX_CONCURRENT: usize = 64; - - let failed_utxos_arc: Arc> = Arc::new(DashMap::new()); - let addrs_for_further_track_arc: Arc> = Arc::new(DashMap::new()); - let fetched_addrs_arc: Arc> = Arc::new(DashMap::new()); - - stream::iter(addrs_to_get.into_iter()) - .map(|(addr, (failed_times, amount))| { - let client_clone = self.clone(); - let sender_clone = sender.clone(); - - let failed_utxos = Arc::clone(&failed_utxos_arc); - let addrs_for_further_track = Arc::clone(&addrs_for_further_track_arc); - let fetched_addrs = Arc::clone(&fetched_addrs_arc); - async move { - let result = client_clone.crawl_spend(addr).await; - - match result { - InternalGetNetworkSpend::Spend(spend) => { - let for_further_track = beta_track_analyze_spend(&spend); - let _ = sender_clone - .send((*spend, for_further_track.len() as u64, false)) - .await; - for entry in for_further_track { - let _ = addrs_for_further_track.insert(entry, ()); - } - fetched_addrs.insert(addr, ()); - } - InternalGetNetworkSpend::DoubleSpend(spends) => { - warn!( - "Detected burnt spend regarding {addr:?} - {:?}", - spends.len() - ); - - for (i, spend) in spends.into_iter().enumerate() { - let reason = spend.reason(); - let amount = spend.spend.amount(); - let ancestors_len = spend.spend.ancestors.len(); - let descendants_len = spend.spend.descendants.len(); - let roy_len = spend.spend.network_royalties().len(); - warn!("burnt spend entry {i} reason {reason:?}, amount {amount}, ancestors: {ancestors_len}, descendants: {descendants_len}, royalties: {roy_len}, {:?} - {:?}", - spend.spend.ancestors, spend.spend.descendants); - } - fetched_addrs.insert(addr, ()); - } - InternalGetNetworkSpend::NotFound => { - let reattempt_interval = if amount.as_nano() > 100000 { - info!("Not find spend of big-UTXO {addr:?} with {amount}"); - reattempt_seconds - } else { - reattempt_seconds * (failed_times * 8 + 1) - }; - failed_utxos.insert( - addr, - ( - failed_times + 1, - Instant::now() + Duration::from_secs(reattempt_interval), - amount, - ), - ); - } - InternalGetNetworkSpend::Error(e) => { - warn!("Fetching spend {addr:?} with {amount:?} result in error {e:?}"); - // Error of `NotEnoughCopies` could be re-attempted and succeed eventually. - failed_utxos.insert( - addr, - ( - failed_times + 1, - Instant::now() + Duration::from_secs(reattempt_seconds), - amount, - ), - ); - } - } - - (addr, amount) - } - }) - .buffer_unordered(MAX_CONCURRENT) - .for_each(|(address, amount)| async move { - info!("Completed fetching attempt of {address:?} with amount {amount:?}"); - }) - .await; - - let mut failed_utxos_result = BTreeMap::new(); - for entry in failed_utxos_arc.iter() { - let key = entry.key(); - let val = entry.value(); - let _ = failed_utxos_result.insert(*key, *val); - } - - let mut fetched_addrs = Vec::new(); - for entry in fetched_addrs_arc.iter() { - let key = entry.key(); - fetched_addrs.push(*key); - } - - let mut addrs_for_further_track = BTreeSet::new(); - for entry in addrs_for_further_track_arc.iter() { - let key = entry.key(); - let _ = addrs_for_further_track.insert(*key); - } - - (failed_utxos_result, fetched_addrs, addrs_for_further_track) - } - - /// Crawls the Spend Dag from a given SpendAddress recursively - /// following descendants all the way to UTXOs - /// Returns the UTXOs reached - pub async fn spend_dag_crawl_from( - &self, - spend_addr: SpendAddress, - spend_processing: Sender, - ) -> WalletResult> { - info!("Crawling spend DAG from {spend_addr:?}"); - let mut utxos = BTreeSet::new(); - - // get first spend - let mut descendants_to_follow = match self.crawl_spend(spend_addr).await { - InternalGetNetworkSpend::Spend(spend) => { - let spend = *spend; - let descendants_to_follow = spend.spend.descendants.clone(); - - spend_processing - .send(spend) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - descendants_to_follow - } - InternalGetNetworkSpend::DoubleSpend(spends) => { - let mut descendants_to_follow = BTreeMap::new(); - for spend in spends.into_iter() { - descendants_to_follow.extend(spend.spend.descendants.clone()); - spend_processing - .send(spend) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - descendants_to_follow - } - InternalGetNetworkSpend::NotFound => { - // the cashnote was not spent yet, so it's an UTXO - info!("UTXO at {spend_addr:?}"); - utxos.insert(spend_addr); - return Ok(utxos); - } - InternalGetNetworkSpend::Error(e) => { - return Err(WalletError::FailedToGetSpend(e.to_string())); - } - }; - - // use iteration instead of recursion to avoid stack overflow - let mut known_descendants: BTreeSet = BTreeSet::new(); - let mut gen: u32 = 0; - let start = std::time::Instant::now(); - - while !descendants_to_follow.is_empty() { - let mut next_gen_descendants = BTreeMap::new(); - - // list up all descendants - let mut addrs = vec![]; - for (descendant, _amount) in descendants_to_follow.iter() { - let addrs_to_follow = SpendAddress::from_unique_pubkey(descendant); - info!("Gen {gen} - Following descendant : {descendant:?}"); - addrs.push(addrs_to_follow); - } - - // get all spends in parallel - let mut stream = futures::stream::iter(addrs.clone()) - .map(|a| async move { (self.crawl_spend(a).await, a) }) - .buffer_unordered(crate::MAX_CONCURRENT_TASKS); - info!( - "Gen {gen} - Getting {} spends from {} txs in batches of: {}", - addrs.len(), - descendants_to_follow.len(), - crate::MAX_CONCURRENT_TASKS, - ); - - // insert spends in the dag as they are collected - while let Some((get_spend, addr)) = stream.next().await { - match get_spend { - InternalGetNetworkSpend::Spend(spend) => { - next_gen_descendants.extend(spend.spend.descendants.clone()); - spend_processing - .send(*spend.clone()) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - InternalGetNetworkSpend::DoubleSpend(spends) => { - info!("Fetched double spend(s) of len {} at {addr:?} from network, following all of them.", spends.len()); - for s in spends.into_iter() { - next_gen_descendants.extend(s.spend.descendants.clone()); - spend_processing - .send(s.clone()) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - } - InternalGetNetworkSpend::NotFound => { - info!("Reached UTXO at {addr:?}"); - utxos.insert(addr); - } - InternalGetNetworkSpend::Error(err) => { - error!("Failed to get spend at {addr:?} during DAG collection: {err:?}") - } - } - } - - // only follow descendants we haven't already gathered - let followed_descendants: BTreeSet = - descendants_to_follow.keys().copied().collect(); - known_descendants.extend(followed_descendants); - descendants_to_follow = next_gen_descendants - .into_iter() - .filter(|(key, _)| !known_descendants.contains(key)) - .collect(); - - // go on to next gen - gen += 1; - } - - let elapsed = start.elapsed(); - info!("Finished crawling SpendDAG from {spend_addr:?} in {elapsed:?}"); - Ok(utxos) - } - - /// Extends an existing SpendDag with a new SignedSpend, - /// tracing back the ancestors of that Spend all the way to a known Spend in the DAG or else back to Genesis - /// Verifies the DAG and records faults if any - /// This is useful to keep a partial SpendDag to be able to verify that new spends come from Genesis - /// - /// ```text - /// ... -- - /// \ - /// ... ---- ... -- - /// \ \ - /// Spend0 -> Spend1 -----> Spend2 ---> Spend5 ---> Spend2 ---> Genesis - /// \ / - /// ---> Spend3 ---> Spend6 -> - /// \ / - /// -> Spend4 -> - /// / - /// ... - /// - /// ``` - pub async fn spend_dag_extend_until( - &self, - dag: &mut SpendDag, - spend_addr: SpendAddress, - new_spend: SignedSpend, - ) -> WalletResult<()> { - // check existence of spend in dag - let is_new_spend = dag.insert(spend_addr, new_spend.clone()); - if !is_new_spend { - return Ok(()); - } - - // use iteration instead of recursion to avoid stack overflow - let mut ancestors_to_verify = new_spend.spend.ancestors.clone(); - let mut depth = 0; - let mut known_ancestors = BTreeSet::from_iter([dag.source()]); - let start = std::time::Instant::now(); - - while !ancestors_to_verify.is_empty() { - let mut next_gen_ancestors = BTreeSet::new(); - - for ancestor in ancestors_to_verify { - let addrs_to_verify = vec![SpendAddress::from_unique_pubkey(&ancestor)]; - debug!("Depth {depth} - checking parent : {ancestor:?} - {addrs_to_verify:?}"); - - // get all parent spends in parallel - let tasks: Vec<_> = addrs_to_verify - .iter() - .map(|a| self.crawl_spend(*a)) - .collect(); - let mut spends = BTreeSet::new(); - for (spend_get, a) in join_all(tasks) - .await - .into_iter() - .zip(addrs_to_verify.clone()) - { - match spend_get { - InternalGetNetworkSpend::Spend(s) => { - spends.insert(*s); - } - InternalGetNetworkSpend::DoubleSpend(s) => { - spends.extend(s.into_iter()); - } - InternalGetNetworkSpend::NotFound => { - return Err(WalletError::FailedToGetSpend(format!( - "Missing ancestor spend at {a:?}" - ))) - } - InternalGetNetworkSpend::Error(e) => { - return Err(WalletError::FailedToGetSpend(format!( - "Failed to get ancestor spend at {a:?}: {e}" - ))) - } - } - } - let spends_len = spends.len(); - debug!("Depth {depth} - Got {spends_len} spends for parent: {addrs_to_verify:?}"); - trace!("Spends for {addrs_to_verify:?} - {spends:?}"); - - // add spends to the dag - known_ancestors.extend(addrs_to_verify.clone()); - for (spend, addr) in spends.clone().into_iter().zip(addrs_to_verify) { - let is_new_spend = dag.insert(addr, spend.clone()); - - // no need to check this spend's parents if it was already in the DAG - if is_new_spend { - next_gen_ancestors.extend(spend.spend.ancestors.clone()); - } - } - } - - // only verify parents we haven't already verified - ancestors_to_verify = next_gen_ancestors - .into_iter() - .filter(|ancestor| { - !known_ancestors.contains(&SpendAddress::from_unique_pubkey(ancestor)) - }) - .collect(); - - depth += 1; - let elapsed = start.elapsed(); - let n = known_ancestors.len(); - info!("Now at depth {depth} - Collected spends from {n} transactions in {elapsed:?}"); - } - - let elapsed = start.elapsed(); - let n = known_ancestors.len(); - info!("Collected the DAG branch all the way to known spends or genesis! Through {depth} generations, collecting spends from {n} transactions in {elapsed:?}"); - - // verify the DAG - info!("Now verifying SpendDAG extended at {spend_addr:?} and recording errors..."); - let start = std::time::Instant::now(); - if let Err(e) = dag.record_faults(&dag.source()) { - let s = format!( - "Collected DAG starting at {spend_addr:?} is invalid, this is probably a bug: {e}" - ); - error!("{s}"); - return Err(WalletError::Dag(s)); - } - let elapsed = start.elapsed(); - info!("Finished verifying SpendDAG extended from {spend_addr:?} in {elapsed:?}"); - Ok(()) - } - - /// Extends an existing SpendDag starting from the given utxos - /// If verify is true, records faults in the DAG - pub async fn spend_dag_continue_from( - &self, - dag: &mut SpendDag, - utxos: BTreeSet, - spend_processing: Option>, - verify: bool, - ) { - let main_dag_src = dag.source(); - info!( - "Expanding spend DAG with source: {main_dag_src:?} from {} utxos", - utxos.len() - ); - - let sender = spend_processing.clone(); - let tasks = utxos - .iter() - .map(|utxo| self.spend_dag_build_from(*utxo, sender.clone(), false)); - let sub_dags = join_all(tasks).await; - for (res, addr) in sub_dags.into_iter().zip(utxos.into_iter()) { - match res { - Ok(sub_dag) => { - debug!("Gathered sub DAG from: {addr:?}"); - if let Err(e) = dag.merge(sub_dag, verify) { - warn!("Failed to merge sub dag from {addr:?} into dag: {e}"); - } - } - Err(e) => warn!("Failed to gather sub dag from {addr:?}: {e}"), - }; - } - - info!("Done gathering spend DAG from utxos"); - } - - /// Extends an existing SpendDag starting from the utxos in this DAG - /// Covers the entirety of currently existing Spends if the DAG was built from Genesis - /// If verify is true, records faults in the DAG - /// Stops gathering after max_depth generations - pub async fn spend_dag_continue_from_utxos( - &self, - dag: &mut SpendDag, - spend_processing: Option>, - verify: bool, - ) { - let utxos = dag.get_utxos(); - self.spend_dag_continue_from(dag, utxos, spend_processing, verify) - .await - } - - /// Internal get spend helper for DAG purposes - /// For crawling, a special fetch policy is deployed to improve the performance: - /// 1. Expect `majority` copies as it is a `Spend`; - /// 2. But don't retry as most will be `UTXO` which won't be found. - async fn crawl_spend(&self, spend_addr: SpendAddress) -> InternalGetNetworkSpend { - match self.crawl_spend_from_network(spend_addr).await { - Ok(s) => { - debug!("DAG crawling: fetched spend {spend_addr:?} from network"); - InternalGetNetworkSpend::Spend(Box::new(s)) - } - Err(Error::Network(NetworkError::GetRecordError(GetRecordError::RecordNotFound))) => { - debug!("DAG crawling: spend at {spend_addr:?} not found on the network"); - InternalGetNetworkSpend::NotFound - } - Err(Error::Network(NetworkError::DoubleSpendAttempt(spends))) => { - debug!("DAG crawling: got a double spend(s) of len {} at {spend_addr:?} on the network", spends.len()); - InternalGetNetworkSpend::DoubleSpend(spends) - } - Err(e) => { - debug!( - "DAG crawling: got an error for spend at {spend_addr:?} on the network: {e}" - ); - InternalGetNetworkSpend::Error(e) - } - } - } -} - -/// Helper function to analyze spend for beta_tracking optimization. -/// returns the new_utxos that needs to be further tracked. -fn beta_track_analyze_spend(spend: &SignedSpend) -> BTreeSet<(SpendAddress, NanoTokens)> { - // Filter out royalty outputs - let royalty_pubkeys: BTreeSet<_> = spend - .spend - .network_royalties() - .iter() - .map(|(_, _, der)| NETWORK_ROYALTIES_PK.new_unique_pubkey(der)) - .collect(); - let default_royalty_pubkeys: BTreeSet<_> = spend - .spend - .network_royalties() - .iter() - .map(|(_, _, der)| DEFAULT_NETWORK_ROYALTIES_PK.new_unique_pubkey(der)) - .collect(); - - let spend_addr = spend.address(); - let new_utxos: BTreeSet<_> = spend - .spend - .descendants - .iter() - .filter_map(|(unique_pubkey, amount)| { - if default_royalty_pubkeys.contains(unique_pubkey) - || royalty_pubkeys.contains(unique_pubkey) - { - None - } else { - let addr = SpendAddress::from_unique_pubkey(unique_pubkey); - - if amount.as_nano() > 100000 { - info!("Spend {spend_addr:?} has a big-UTXO {addr:?} with {amount}"); - } - - Some((addr, *amount)) - } - }) - .collect(); - - if let SpendReason::BetaRewardTracking(_) = spend.reason() { - // Do not track down forwarded payment further - Default::default() - } else { - trace!( - "Spend {spend_addr:?} original has {} outputs, tracking {} of them.", - spend.spend.descendants.len(), - new_utxos.len() - ); - new_utxos - } -} diff --git a/sn_client/src/audit/dag_error.rs b/sn_client/src/audit/dag_error.rs deleted file mode 100644 index 6fb79953fd..0000000000 --- a/sn_client/src/audit/dag_error.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use serde::{Deserialize, Serialize}; -use sn_transfers::SpendAddress; -use thiserror::Error; - -/// Errors that mean the DAG is invalid -#[derive(Error, Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Hash, PartialOrd, Ord)] -pub enum DagError { - #[error("DAG has no source at {0:?}")] - MissingSource(SpendAddress), - #[error("DAG is incoherent at {0:?}: {1}")] - IncoherentDag(SpendAddress, String), - #[error("DAG with root {0:?} contains a cycle")] - DagContainsCycle(SpendAddress), -} - -/// List of possible faults that can be found in the DAG during verification -/// This indicates a certain spend is invalid and the reason for it -/// but does not mean the DAG is invalid -#[derive(Error, Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Hash, PartialOrd, Ord)] -pub enum SpendFault { - #[error("Double Spend at {0:?}")] - DoubleSpend(SpendAddress), - #[error("Spend at {addr:?} has a missing ancestor at {ancestor:?}, until this ancestor is added to the DAG, it cannot be verified")] - MissingAncestry { - addr: SpendAddress, - ancestor: SpendAddress, - }, - #[error( - "Spend at {addr:?} has a double spent ancestor at {ancestor:?}, making it unspendable" - )] - DoubleSpentAncestor { - addr: SpendAddress, - ancestor: SpendAddress, - }, - #[error("Invalid transaction for spend at {0:?}: {1}")] - InvalidTransaction(SpendAddress, String), - #[error("Poisoned ancestry for spend at {0:?}: {1}")] - PoisonedAncestry(SpendAddress, String), - #[error("Spend at {addr:?} does not descend from given source: {src:?}")] - OrphanSpend { - addr: SpendAddress, - src: SpendAddress, - }, -} - -impl DagError { - pub fn spend_address(&self) -> SpendAddress { - match self { - DagError::MissingSource(addr) - | DagError::IncoherentDag(addr, _) - | DagError::DagContainsCycle(addr) => *addr, - } - } -} - -impl SpendFault { - pub fn spend_address(&self) -> SpendAddress { - match self { - SpendFault::DoubleSpend(addr) - | SpendFault::MissingAncestry { addr, .. } - | SpendFault::DoubleSpentAncestor { addr, .. } - | SpendFault::InvalidTransaction(addr, _) - | SpendFault::PoisonedAncestry(addr, _) - | SpendFault::OrphanSpend { addr, .. } => *addr, - } - } -} diff --git a/sn_client/src/audit/spend_dag.rs b/sn_client/src/audit/spend_dag.rs deleted file mode 100644 index fbf00bd947..0000000000 --- a/sn_client/src/audit/spend_dag.rs +++ /dev/null @@ -1,831 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use bls::SecretKey; -use petgraph::dot::Dot; -use petgraph::graph::{DiGraph, NodeIndex}; -use petgraph::visit::EdgeRef; -use serde::{Deserialize, Serialize}; -use sn_transfers::{ - is_genesis_spend, CashNoteRedemption, DerivationIndex, Hash, NanoTokens, SignedSpend, - SpendAddress, UniquePubkey, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - path::Path, -}; - -use super::dag_error::{DagError, SpendFault}; - -/// A DAG representing the spends from a specific Spend all the way to the UTXOs. -/// Starting from Genesis, this would encompass all the spends that have happened on the network -/// at a certain point in time. -/// -/// ```text -/// -> Spend7 ---> UTXO_11 -/// / -/// Genesis -> Spend1 -----> Spend2 ---> Spend5 ---> UTXO_10 -/// \ -/// ---> Spend3 ---> Spend6 ---> UTXO_9 -/// \ -/// -> Spend4 ---> UTXO_8 -/// -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SpendDag { - /// A directed graph of spend addresses - dag: DiGraph, - /// All the spends refered to in the dag indexed by their SpendAddress - spends: BTreeMap, - /// The source of the DAG (aka Genesis) - source: SpendAddress, - /// Recorded faults in the DAG - faults: BTreeMap>, -} - -type DagIndex = usize; - -/// Internal Dag entry type -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] -enum DagEntry { - NotGatheredYet(DagIndex), - DoubleSpend(Vec<(SignedSpend, DagIndex)>), - Spend(Box, DagIndex), -} - -impl DagEntry { - fn indexes(&self) -> Vec { - match self { - DagEntry::NotGatheredYet(idx) => vec![*idx], - DagEntry::DoubleSpend(spends) => spends.iter().map(|(_, idx)| *idx).collect(), - DagEntry::Spend(_, idx) => vec![*idx], - } - } - - fn spends(&self) -> Vec<&SignedSpend> { - match self { - DagEntry::Spend(spend, _) => vec![&**spend], - DagEntry::DoubleSpend(spends) => spends.iter().map(|(s, _)| s).collect(), - DagEntry::NotGatheredYet(_) => vec![], - } - } -} - -/// The result of a get operation on the DAG -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] -pub enum SpendDagGet { - /// Spend does not exist in the DAG - SpendNotFound, - /// Spend key is refered to by known spends but does not exist in the DAG yet - Utxo, - /// Spend is a double spend - DoubleSpend(Vec), - /// Spend is in the DAG - Spend(Box), -} - -impl SpendDag { - /// Create a new DAG with a given source - pub fn new(source: SpendAddress) -> Self { - Self { - dag: DiGraph::new(), - spends: BTreeMap::new(), - source, - faults: BTreeMap::new(), - } - } - - pub fn source(&self) -> SpendAddress { - self.source - } - - pub fn load_from_file>(path: P) -> crate::Result { - let bytes = std::fs::read(path)?; - let dag: SpendDag = rmp_serde::from_slice(&bytes)?; - Ok(dag) - } - - pub fn dump_to_file>(&self, path: P) -> crate::Result<()> { - let bytes = rmp_serde::to_vec(&self)?; - std::fs::write(path, bytes)?; - Ok(()) - } - - /// Insert a spend into the dag - /// Creating edges (links) from its ancestors and to its descendants - /// If the inserted spend is already known, it will be ignored - /// If the inserted spend is a double spend, it will be saved along with the previous spend - /// Return true if the spend was inserted and false if it was already in the DAG - pub fn insert(&mut self, spend_addr: SpendAddress, spend: SignedSpend) -> bool { - let existing_entry = self.spends.get(&spend_addr).cloned(); - let new_node_idx = match existing_entry { - // add new spend to the DAG - None => { - let node_idx = self.dag.add_node(spend_addr); - self.spends.insert( - spend_addr, - DagEntry::Spend(Box::new(spend.clone()), node_idx.index()), - ); - node_idx - } - // or upgrade a known but not gathered entry to spend - Some(DagEntry::NotGatheredYet(idx)) => { - self.spends - .insert(spend_addr, DagEntry::Spend(Box::new(spend.clone()), idx)); - let node_idx = NodeIndex::new(idx); - self.remove_all_edges(node_idx); - node_idx - } - // or upgrade spend to double spend if it is different from the existing one - Some(DagEntry::Spend(s, idx)) => { - let existing_spend = *s.clone(); - if existing_spend == spend { - return false; - } - - let node_idx = self.dag.add_node(spend_addr); - let double_spend = DagEntry::DoubleSpend(vec![ - (existing_spend.clone(), idx), - (spend.clone(), node_idx.index()), - ]); - self.spends.insert(spend_addr, double_spend); - node_idx - } - // or add extra spend to an existing double spend if it is unknown yet - Some(DagEntry::DoubleSpend(vec_s)) => { - if vec_s.iter().any(|(s, _idx)| s == &spend) { - return false; - } - - let node_idx = self.dag.add_node(spend_addr); - let mut vec_s = vec_s.clone(); - vec_s.push((spend.clone(), node_idx.index())); - self.spends.insert(spend_addr, DagEntry::DoubleSpend(vec_s)); - node_idx - } - }; - - // link to descendants - for (descendant, amount) in spend.spend.descendants.iter() { - let descendant_addr = SpendAddress::from_unique_pubkey(descendant); - - // add descendant if not already in dag - let spends_at_addr = self.spends.entry(descendant_addr).or_insert_with(|| { - let node_idx = self.dag.add_node(descendant_addr); - DagEntry::NotGatheredYet(node_idx.index()) - }); - - // link to descendant - for idx in spends_at_addr.indexes() { - let descendant_idx = NodeIndex::new(idx); - self.dag.update_edge(new_node_idx, descendant_idx, *amount); - } - } - - // do not link to ancestors if the spend is the source - if spend_addr == self.source { - return true; - } - - // link to ancestors - const PENDING_AMOUNT: NanoTokens = NanoTokens::from(0); - for ancestor in spend.spend.ancestors.iter() { - let ancestor_addr = SpendAddress::from_unique_pubkey(ancestor); - - // add ancestor if not already in dag - let spends_at_addr = self.spends.entry(ancestor_addr).or_insert_with(|| { - let node_idx = self.dag.add_node(ancestor_addr); - DagEntry::NotGatheredYet(node_idx.index()) - }); - - // link to ancestor - match spends_at_addr { - DagEntry::NotGatheredYet(idx) => { - let ancestor_idx = NodeIndex::new(*idx); - self.dag - .update_edge(ancestor_idx, new_node_idx, PENDING_AMOUNT); - } - DagEntry::Spend(ancestor_spend, idx) => { - let ancestor_idx = NodeIndex::new(*idx); - let ancestor_given_amount = ancestor_spend - .spend - .descendants - .iter() - .find(|(descendant, _amount)| **descendant == spend.spend.unique_pubkey) - .map(|(_descendant, amount)| *amount) - .unwrap_or(PENDING_AMOUNT); - self.dag - .update_edge(ancestor_idx, new_node_idx, ancestor_given_amount); - } - DagEntry::DoubleSpend(multiple_ancestors) => { - for (ancestor_spend, ancestor_idx) in multiple_ancestors { - if ancestor_spend - .spend - .descendants - .contains_key(spend.unique_pubkey()) - { - let ancestor_idx = NodeIndex::new(*ancestor_idx); - let ancestor_given_amount = ancestor_spend - .spend - .descendants - .iter() - .find(|(descendant, _amount)| { - **descendant == spend.spend.unique_pubkey - }) - .map(|(_descendant, amount)| *amount) - .unwrap_or(PENDING_AMOUNT); - self.dag - .update_edge(ancestor_idx, new_node_idx, ancestor_given_amount); - } - } - } - } - } - - true - } - - /// Get spend addresses that probably exist as they are refered to by spends we know, - /// but we haven't gathered them yet - /// This includes UTXOs and unknown ancestors - pub fn get_pending_spends(&self) -> BTreeSet { - self.spends - .iter() - .filter_map(|(addr, entry)| match entry { - DagEntry::NotGatheredYet(_) => Some(*addr), - _ => None, - }) - .collect() - } - - /// Get the UTXOs: all the addresses that are refered to as children by other spends - /// but that don't have children themselves. - /// Those will eventually exist on the Network as the address is spent by their owners. - pub fn get_utxos(&self) -> BTreeSet { - let mut leaves = BTreeSet::new(); - for node_index in self.dag.node_indices() { - if !self - .dag - .neighbors_directed(node_index, petgraph::Direction::Outgoing) - .any(|_| true) - { - let utxo_addr = self.dag[node_index]; - leaves.insert(utxo_addr); - } - } - leaves - } - - pub fn dump_dot_format(&self) -> String { - format!("{:?}", Dot::with_config(&self.dag, &[])) - } - - pub fn dump_payment_forward_statistics(&self, sk: &SecretKey) -> String { - let mut statistics: BTreeMap> = Default::default(); - - let mut hash_dictionary: BTreeMap = Default::default(); - - // The following three is used in the memcheck test script. - // Update whenever these three got changed in the script. - let bootstrap_string = "bootstrap".to_string(); - let restart_string = "restart".to_string(); - let restarted_string = "restarted".to_string(); - let _ = hash_dictionary.insert(Hash::hash(bootstrap_string.as_bytes()), bootstrap_string); - let _ = hash_dictionary.insert(Hash::hash(restart_string.as_bytes()), restart_string); - let _ = hash_dictionary.insert(Hash::hash(restarted_string.as_bytes()), restarted_string); - for i in 0..50 { - let node_string = format!("node_{i}"); - let _ = hash_dictionary.insert(Hash::hash(node_string.as_bytes()), node_string); - } - - for spend_dag_entry in self.spends.values() { - if let DagEntry::Spend(signed_spend, _) = spend_dag_entry { - if let Some(sender_hash) = signed_spend.spend.reason.decrypt_discord_cypher(sk) { - let sender = if let Some(readable_sender) = hash_dictionary.get(&sender_hash) { - readable_sender.clone() - } else { - format!("{sender_hash:?}") - }; - let holders = statistics.entry(sender).or_default(); - holders.push(signed_spend.spend.amount()); - } - } - } - - let mut content = "Sender, Times, Amount".to_string(); - for (sender, payments) in statistics.iter() { - let total_amount: u64 = payments - .iter() - .map(|nano_tokens| nano_tokens.as_nano()) - .sum(); - content = format!("{content}\n{sender}, {}, {total_amount}", payments.len()); - } - content - } - - /// Merges the given dag into ours, optionally recomputing the faults after merge - /// If verify is set to false, the faults will not be computed, this can be useful when batching merges to avoid re-verifying - /// be sure to manually verify afterwards - pub fn merge(&mut self, sub_dag: SpendDag, verify: bool) -> Result<(), DagError> { - let source = self.source(); - info!( - "Merging sub DAG starting at {:?} into our DAG with source {:?}", - sub_dag.source(), - source - ); - for (addr, spends) in sub_dag.spends { - // only add spends to the dag, ignoring utxos and not yet gathered relatives - // utxos will be added automatically as their ancestors are added - // edges are updated by the insert method - match spends { - DagEntry::NotGatheredYet(_) => continue, - DagEntry::DoubleSpend(spends) => { - for (spend, _) in spends { - self.insert(addr, spend); - } - } - DagEntry::Spend(spend, _) => { - self.insert(addr, *spend); - } - } - } - - // recompute faults - if verify { - self.record_faults(&source)?; - } - - Ok(()) - } - - /// Get the spend at a given address - pub fn get_spend(&self, addr: &SpendAddress) -> SpendDagGet { - match self.spends.get(addr) { - None => SpendDagGet::SpendNotFound, - Some(DagEntry::NotGatheredYet(_)) => SpendDagGet::Utxo, - Some(DagEntry::DoubleSpend(spends)) => { - SpendDagGet::DoubleSpend(spends.iter().map(|(s, _)| s.clone()).collect()) - } - Some(DagEntry::Spend(spend, _)) => SpendDagGet::Spend(spend.clone()), - } - } - - /// Get the recorded faults if any for a given spend address - pub fn get_spend_faults(&self, addr: &SpendAddress) -> BTreeSet { - self.faults.get(addr).cloned().unwrap_or_default() - } - - /// Helper to get underlying index of spend entry in the DAG - /// This unstable API is used to access the underlying graph for testing purposes - /// An empty vec is returned if the spend is not in the DAG - pub fn get_spend_indexes(&self, addr: &SpendAddress) -> Vec { - self.spends - .get(addr) - .map(|spends| spends.indexes()) - .unwrap_or_default() - } - - /// Get all spends from the DAG - pub fn all_spends(&self) -> Vec<&SignedSpend> { - self.spends - .values() - .flat_map(|entry| entry.spends()) - .collect() - } - - /// Get the faults recorded in the DAG - pub fn faults(&self) -> &BTreeMap> { - &self.faults - } - - /// Get all royalties from the DAG - pub fn all_royalties(&self) -> crate::Result> { - let spends = self.all_spends(); - let mut royalties_by_unique_pk: BTreeMap< - UniquePubkey, - Vec<(DerivationIndex, SpendAddress)>, - > = BTreeMap::new(); - for s in spends { - let parent_spend_addr = SpendAddress::from_unique_pubkey(&s.spend.unique_pubkey); - for (roy_pk, _, derivation_idx) in s.spend.network_royalties() { - royalties_by_unique_pk - .entry(roy_pk) - .and_modify(|v| v.push((derivation_idx, parent_spend_addr))) - .or_insert(vec![(derivation_idx, parent_spend_addr)]); - } - } - - // assemble those and check - let mut royalties = vec![]; - for (unique_pk, vec) in royalties_by_unique_pk.into_iter() { - let parents_spend_addrs = vec.iter().map(|(_di, spend_addr)| *spend_addr).collect(); - let derivation_idx_uniq: BTreeSet<_> = - vec.iter().map(|(di, _spend_addr)| *di).collect(); - let idx_vec: Vec<_> = derivation_idx_uniq.into_iter().collect(); - let derivation_index = match idx_vec.as_slice() { - [one_unique] => *one_unique, - _ => { - warn!("DerivationIndex in single royalty output for {unique_pk:?} should have been unique, found parents and reported derivation index {vec:?}"); - continue; - } - }; - royalties.push(CashNoteRedemption::new( - derivation_index, - parents_spend_addrs, - )) - } - Ok(royalties) - } - - /// Remove all edges from a Node in the DAG - fn remove_all_edges(&mut self, node: NodeIndex) { - let incoming: Vec<_> = self - .dag - .edges_directed(node, petgraph::Direction::Incoming) - .map(|e| e.id()) - .collect(); - let outgoing: Vec<_> = self - .dag - .edges_directed(node, petgraph::Direction::Outgoing) - .map(|e| e.id()) - .collect(); - for edge in incoming.into_iter().chain(outgoing.into_iter()) { - self.dag.remove_edge(edge); - } - } - - /// helper that returns the direct ancestors of a given spend - /// along with any faults detected - /// On error returns the address of the missing ancestor - fn get_direct_ancestors( - &self, - spend: &SignedSpend, - ) -> Result<(BTreeSet, BTreeSet), SpendAddress> { - let addr = spend.address(); - let mut ancestors = BTreeSet::new(); - let mut faults = BTreeSet::new(); - for ancestor in spend.spend.ancestors.iter() { - let ancestor_addr = SpendAddress::from_unique_pubkey(ancestor); - match self.spends.get(&ancestor_addr) { - Some(DagEntry::Spend(ancestor_spend, _)) => { - ancestors.insert(*ancestor_spend.clone()); - } - Some(DagEntry::NotGatheredYet(_)) | None => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is missing"); - return Err(ancestor_addr); - } - Some(DagEntry::DoubleSpend(multiple_ancestors)) => { - debug!("Direct ancestor for spend {spend:?} at {ancestor_addr:?} is a double spend"); - faults.insert(SpendFault::DoubleSpentAncestor { - addr, - ancestor: ancestor_addr, - }); - let actual_ancestor: Vec<_> = multiple_ancestors - .iter() - .filter(|(s, _)| s.spend.descendants.contains_key(spend.unique_pubkey())) - .map(|(s, _)| s.clone()) - .collect(); - match actual_ancestor.as_slice() { - [ancestor_spend] => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is a double spend but one of those match our parent_tx hash, using it for verification"); - ancestors.insert(ancestor_spend.clone()); - } - [ancestor1, _ancestor2, ..] => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is a double spend and mutliple match our parent_tx hash, using the first one for verification"); - ancestors.insert(ancestor1.clone()); - } - [] => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is a double spend and none of them match the spend parent_tx, which means the parent for this spend is missing!"); - return Err(ancestor_addr); - } - } - } - } - } - Ok((ancestors, faults)) - } - - /// helper that returns all the descendants (recursively all the way to UTXOs) of a given spend - fn all_descendants(&self, addr: &SpendAddress) -> Result, DagError> { - let mut descendants = BTreeSet::new(); - let mut to_traverse = BTreeSet::from_iter(vec![addr]); - while let Some(current_addr) = to_traverse.pop_first() { - // get the spend at this address - let dag_entry = match self.spends.get(current_addr) { - Some(entry) => entry, - None => { - warn!("Incoherent DAG, missing descendant spend when expecting one at: {current_addr:?}"); - return Err(DagError::IncoherentDag( - *current_addr, - format!("Missing descendant spend in DAG at: {current_addr:?}"), - )); - } - }; - let (spends, indexes) = (dag_entry.spends(), dag_entry.indexes()); - - // get descendants via spend - let descendants_via_spend: BTreeSet = spends - .into_iter() - .flat_map(|s| s.spend.descendants.keys()) - .map(SpendAddress::from_unique_pubkey) - .collect(); - - // get descendants via DAG - let descendants_via_dag: BTreeSet<&SpendAddress> = indexes - .into_iter() - .flat_map(|idx| { - self.dag - .neighbors_directed(NodeIndex::new(idx), petgraph::Direction::Outgoing) - .map(|i| &self.dag[i]) - }) - .collect(); - - // report inconsistencies - if descendants_via_dag != descendants_via_spend.iter().collect() { - if matches!(dag_entry, DagEntry::NotGatheredYet(_)) { - debug!("Spend at {current_addr:?} was not gathered yet and has children refering to it, continuing traversal through those children..."); - } else { - warn!("Incoherent DAG at: {current_addr:?}"); - return Err(DagError::IncoherentDag( - *current_addr, - format!("descendants via DAG: {descendants_via_dag:?} do not match descendants via spend: {descendants_via_spend:?}") - )); - } - } - - // continue traversal - let not_transversed = descendants_via_dag.difference(&descendants); - to_traverse.extend(not_transversed); - descendants.extend(descendants_via_dag.iter().cloned()); - } - Ok(descendants) - } - - /// find all the orphans in the DAG and record them as OrphanSpend - /// returns the list of OrphanSpend and other errors encountered in the way - fn find_orphans(&self, source: &SpendAddress) -> Result, DagError> { - let mut recorded_faults = BTreeSet::new(); - let all_addresses: BTreeSet<&SpendAddress> = self.spends.keys().collect(); - let all_descendants = self.all_descendants(source)?; - let parents: BTreeSet<_> = self - .get_spend_indexes(source) - .into_iter() - .flat_map(|idx| { - self.dag - .neighbors_directed(NodeIndex::new(idx), petgraph::Direction::Incoming) - }) - .map(|parent_idx| &self.dag[parent_idx]) - .collect(); - let non_orphans = - BTreeSet::from_iter(all_descendants.into_iter().chain(parents).chain([source])); - - // orphans are those that are neither descandants nor source's parents nor source itself - let orphans: BTreeSet<&SpendAddress> = - all_addresses.difference(&non_orphans).cloned().collect(); - for orphan in orphans { - let src = *source; - let addr = *orphan; - debug!("Found orphan: {orphan:?} of {src:?}"); - recorded_faults.insert(SpendFault::OrphanSpend { addr, src }); - } - - Ok(recorded_faults) - } - - /// Checks if a double spend has multiple living descendant branches that fork - fn double_spend_has_forking_descendant_branches(&self, spends: &Vec<&SignedSpend>) -> bool { - // gather all living descendants for each branch - let mut set_of_living_descendants: BTreeSet> = BTreeSet::new(); - for spend in spends { - let gathered_descendants = spend - .spend - .descendants - .keys() - .map(SpendAddress::from_unique_pubkey) - .filter_map(|a| self.spends.get(&a)) - .filter_map(|s| { - if matches!(s, DagEntry::NotGatheredYet(_)) { - None - } else { - Some(s.spends()) - } - }) - .flatten() - .collect::>(); - set_of_living_descendants.insert(gathered_descendants); - } - - // make sure there is no fork - for set1 in set_of_living_descendants.iter() { - for set2 in set_of_living_descendants.iter() { - if set1.is_subset(set2) || set2.is_subset(set1) { - continue; - } else { - return true; - } - } - } - - false - } - - /// Verify the DAG and record faults in the DAG - /// If the DAG is invalid, return an error immediately, without mutating the DAG - pub fn record_faults(&mut self, source: &SpendAddress) -> Result<(), DagError> { - let faults = self.verify(source)?; - - self.faults.clear(); - for f in faults { - self.faults.entry(f.spend_address()).or_default().insert(f); - } - Ok(()) - } - - /// Verify the DAG and return faults detected in the DAG - /// If the DAG itself is invalid, return an error immediately - pub fn verify(&self, source: &SpendAddress) -> Result, DagError> { - info!("Verifying DAG starting off: {source:?}"); - let mut recorded_faults = BTreeSet::new(); - - // verify the DAG is acyclic - if petgraph::algo::is_cyclic_directed(&self.dag) { - warn!("DAG is cyclic"); - return Err(DagError::DagContainsCycle(*source)); - } - - // verify DAG source exists in the DAG (Genesis in case of a complete DAG) - debug!("Verifying DAG source: {source:?}"); - match self.spends.get(source) { - None => { - debug!("DAG does not contain its source: {source:?}"); - return Err(DagError::MissingSource(*source)); - } - Some(DagEntry::DoubleSpend(_)) => { - debug!("DAG source is a double spend: {source:?}"); - recorded_faults.insert(SpendFault::DoubleSpend(*source)); - } - _ => (), - } - - // identify orphans (spends that don't come from the source) - debug!("Looking for orphans of {source:?}"); - recorded_faults.extend(self.find_orphans(source)?); - - // check all transactions - for (addr, _) in self.spends.iter() { - debug!("Verifying transaction at: {addr:?}"); - // get the spend at this address - let spends = self - .spends - .get(addr) - .map(|s| s.spends()) - .unwrap_or_default(); - - // record double spends - if spends.len() > 1 { - debug!("Found a double spend entry in DAG at {addr:?}"); - recorded_faults.insert(SpendFault::DoubleSpend(*addr)); - let direct_descendants: BTreeSet = spends - .iter() - .flat_map(|s| s.spend.descendants.keys()) - .map(SpendAddress::from_unique_pubkey) - .collect(); - debug!("Making the direct descendants of the double spend at {addr:?} as faulty: {direct_descendants:?}"); - for a in direct_descendants.iter() { - recorded_faults.insert(SpendFault::DoubleSpentAncestor { - addr: *a, - ancestor: *addr, - }); - } - if self.double_spend_has_forking_descendant_branches(&spends) { - debug!("Double spend at {addr:?} has multiple living descendant branches, poisoning them..."); - let poison = format!( - "spend is on one of multiple branches of a double spent ancestor: {addr:?}" - ); - let direct_living_descendant_spends: BTreeSet<_> = direct_descendants - .iter() - .filter_map(|a| self.spends.get(a)) - .flat_map(|s| s.spends()) - .collect(); - for s in direct_living_descendant_spends { - recorded_faults.extend(self.poison_all_descendants(s, poison.clone())?); - } - } - continue; - } - - // skip parent verification for source as we don't know its ancestors - if addr == source { - debug!("Skip parent verification for source at: {addr:?}"); - continue; - } - - // verify parents - for s in spends { - recorded_faults.extend(self.verify_spend_parents(s)?); - } - } - - info!( - "Found {} faults: {recorded_faults:#?}", - recorded_faults.len() - ); - Ok(recorded_faults) - } - - /// Verifies a single spend and returns resulting errors and DAG poisoning spread - fn verify_spend_parents(&self, spend: &SignedSpend) -> Result, DagError> { - let addr = spend.address(); - let mut recorded_faults = BTreeSet::new(); - debug!("Verifying spend: {spend:?}"); - - // skip if spend matches genesis - if is_genesis_spend(spend) { - debug!("Skip transaction verification for Genesis: {spend:?}"); - return Ok(recorded_faults); - } - - // get the ancestors of this spend - let (ancestor_spends, faults) = match self.get_direct_ancestors(spend) { - Ok(a) => a, - Err(missing_ancestor) => { - debug!("Failed to get ancestor spends of {spend:?} as ancestor at {missing_ancestor:?} is missing"); - recorded_faults.insert(SpendFault::MissingAncestry { - addr, - ancestor: missing_ancestor, - }); - - let poison = format!("missing ancestor at: {missing_ancestor:?}"); - let descendants_faults = self.poison_all_descendants(spend, poison)?; - recorded_faults.extend(descendants_faults); - return Ok(recorded_faults); - } - }; - recorded_faults.extend(faults); - - // verify the parents - if let Err(e) = spend.verify_parent_spends(&ancestor_spends) { - warn!("Parent verfication failed for spend at: {spend:?}: {e}"); - recorded_faults.insert(SpendFault::InvalidTransaction(addr, format!("{e}"))); - let poison = format!("ancestor transaction was poisoned at: {spend:?}: {e}"); - let descendants_faults = self.poison_all_descendants(spend, poison)?; - recorded_faults.extend(descendants_faults); - } - - Ok(recorded_faults) - } - - /// Poison all descendants of a spend with given the poison message - fn poison_all_descendants( - &self, - spend: &SignedSpend, - poison: String, - ) -> Result, DagError> { - let mut recorded_faults = BTreeSet::new(); - let direct_descendants = spend - .spend - .descendants - .keys() - .map(SpendAddress::from_unique_pubkey) - .collect::>(); - let mut all_descendants = direct_descendants - .iter() - .map(|addr| self.all_descendants(addr)) - .collect::, _>>()? - .into_iter() - .flatten() - .collect::>(); - all_descendants.extend(direct_descendants.iter()); - - for d in all_descendants { - recorded_faults.insert(SpendFault::PoisonedAncestry(*d, poison.clone())); - } - - Ok(recorded_faults) - } -} - -#[cfg(test)] -mod tests { - use xor_name::XorName; - - use super::*; - - #[test] - fn test_spend_dag_serialisation() { - let mut rng = rand::thread_rng(); - let dummy_source = SpendAddress::new(XorName::random(&mut rng)); - let dag = SpendDag::new(dummy_source); - let serialized_data = rmp_serde::to_vec(&dag).expect("Serialization failed"); - let deserialized_instance: SpendDag = - rmp_serde::from_slice(&serialized_data).expect("Deserialization failed"); - let reserialized_data = - rmp_serde::to_vec(&deserialized_instance).expect("Serialization failed"); - assert_eq!(reserialized_data, serialized_data); - } -} diff --git a/sn_client/src/audit/tests/mod.rs b/sn_client/src/audit/tests/mod.rs deleted file mode 100644 index d00e4b1055..0000000000 --- a/sn_client/src/audit/tests/mod.rs +++ /dev/null @@ -1,478 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod setup; - -use std::collections::BTreeSet; - -use setup::MockNetwork; - -use eyre::Result; -use sn_transfers::SpendAddress; - -use crate::{SpendDag, SpendFault}; - -#[test] -fn test_spend_dag_verify_valid_simple() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - - net.send(&owner1, &owner2, 100)?; - net.send(&owner2, &owner3, 100)?; - net.send(&owner3, &owner4, 100)?; - net.send(&owner4, &owner5, 100)?; - net.send(&owner5, &owner6, 100)?; - - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - dag.insert(spend.address(), spend.clone()); - } - assert_eq!(dag.record_faults(&genesis), Ok(())); - // dag.dump_to_file("/tmp/test_spend_dag_verify_valid_simple")?; - - assert_eq!(dag.verify(&genesis), Ok(BTreeSet::new())); - Ok(()) -} - -#[test] -fn test_spend_dag_double_spend_poisonning() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - let owner_cheat = net.new_pk_with_balance(0)?; - - // spend normaly and save a cashnote to reuse later - net.send(&owner1, &owner2, 100)?; - let cn_to_reuse_later = net - .wallets - .get(&owner2) - .expect("owner2 wallet to exist") - .cn - .clone(); - let spend1 = net.send(&owner2, &owner3, 100)?; - let spend_ko3 = net.send(&owner3, &owner4, 100)?; - let spend_ok4 = net.send(&owner4, &owner5, 100)?; - let spend_ok5 = net.send(&owner5, &owner6, 100)?; - - // reuse that cashnote to perform a double spend far back in history - net.wallets - .get_mut(&owner2) - .expect("owner2 wallet to still exist") - .cn = cn_to_reuse_later; - let spend2 = net.send(&owner2, &owner_cheat, 100)?; - - // create dag - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - dag.insert(spend.address(), spend.clone()); - } - assert_eq!(dag.record_faults(&genesis), Ok(())); - // dag.dump_to_file("/tmp/test_spend_dag_double_spend_poisonning")?; - - // make sure double spend is detected - assert_eq!(spend1, spend2, "both spends should be at the same address"); - let double_spent = spend1.first().expect("spend1 to have an element"); - let got = dag.get_spend_faults(double_spent); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpend(*double_spent)]); - assert_eq!(got, expected, "DAG should have detected double spend"); - - // make sure the double spend's direct descendants are unspendable - let upk = net - .wallets - .get(&owner_cheat) - .expect("owner_cheat wallet to exist") - .cn - .first() - .expect("owner_cheat wallet to have 1 cashnote") - .unique_pubkey(); - let utxo = SpendAddress::from_unique_pubkey(&upk); - let got = dag.get_spend_faults(&utxo); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: utxo, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "UTXO of double spend should be unspendable"); - let s3 = spend_ko3.first().expect("spend_ko3 to have an element"); - let got = dag.get_spend_faults(s3); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: *s3, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "spend_ko3 should be unspendable"); - - // make sure this didn't poison the rest of the DAG - let s4 = spend_ok4.first().expect("spend_ok4 to be unique"); - let s5 = spend_ok5.first().expect("spend_ok5 to be unique"); - let unaffected = BTreeSet::new(); - - assert_eq!(dag.get_spend_faults(s4), unaffected); - assert_eq!(dag.get_spend_faults(s5), unaffected); - Ok(()) -} - -#[test] -fn test_spend_dag_double_spend_branches() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - let owner3a = net.new_pk_with_balance(0)?; - let owner4a = net.new_pk_with_balance(0)?; - let owner5a = net.new_pk_with_balance(0)?; - - // spend normaly and save a cashnote to reuse later - net.send(&owner1, &owner2, 100)?; - let cn_to_reuse_later = net - .wallets - .get(&owner2) - .expect("owner2 wallet to exist") - .cn - .clone(); - let spend2 = net.send(&owner2, &owner3, 100)?; - let spend3 = net.send(&owner3, &owner4, 100)?; - let spend4 = net.send(&owner4, &owner5, 100)?; - let spend5 = net.send(&owner5, &owner6, 100)?; - - // reuse that cashnote to perform a double spend and create a branch - net.wallets - .get_mut(&owner2) - .expect("owner2 wallet to still exist") - .cn = cn_to_reuse_later; - let spend2a = net.send(&owner2, &owner3a, 100)?; - let spend3a = net.send(&owner3a, &owner4a, 100)?; - let spend4a = net.send(&owner4a, &owner5a, 100)?; - - // create dag - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - println!("Adding into dag with spend {spend:?}"); - dag.insert(spend.address(), spend.clone()); - } - - assert_eq!(dag.record_faults(&genesis), Ok(())); - // dag.dump_to_file("/tmp/test_spend_dag_double_spend_branches")?; - - // make sure double spend is detected - assert_eq!(spend2, spend2a, "both spends should be at the same address"); - let double_spent = spend2.first().expect("spend1 to have an element"); - let got = dag.get_spend_faults(double_spent); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpend(*double_spent)]); - assert_eq!(got, expected, "DAG should have detected double spend"); - - // make sure the double spend's direct descendants are marked as double spent - let s3 = spend3.first().expect("spend3 to have an element"); - let got = dag.get_spend_faults(s3); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: *s3, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "spend3 should be unspendable"); - let s3a = spend3a.first().expect("spend3a to have an element"); - let got = dag.get_spend_faults(s3a); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: *s3a, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "spend3a should be unspendable"); - - // make sure all the descendants further down the branch are poisoned due to a double spent ancestor - let utxo_of_5a = SpendAddress::from_unique_pubkey( - &net.wallets - .get(&owner5a) - .expect("owner5a wallet to exist") - .cn - .first() - .expect("owner5a wallet to have 1 cashnote") - .unique_pubkey(), - ); - let utxo_of_6 = SpendAddress::from_unique_pubkey( - &net.wallets - .get(&owner6) - .expect("owner6 wallet to exist") - .cn - .first() - .expect("owner6 wallet to have 1 cashnote") - .unique_pubkey(), - ); - let all_descendants = [spend4, spend5, vec![utxo_of_6], spend4a, vec![utxo_of_5a]]; - for d in all_descendants.iter() { - let got = dag.get_spend_faults(d.first().expect("descendant spend to have an element")); - let expected = BTreeSet::from_iter([SpendFault::PoisonedAncestry( - *d.first().expect("d to have an element"), - format!( - "spend is on one of multiple branches of a double spent ancestor: {double_spent:?}" - ), - )]); - assert_eq!(got, expected, "all descendants should be marked as bad"); - } - Ok(()) -} - -#[test] -fn test_spend_dag_double_spend_detection() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2a = net.new_pk_with_balance(0)?; - let owner2b = net.new_pk_with_balance(0)?; - - // perform double spend - let cn_to_reuse = net - .wallets - .get(&owner1) - .expect("owner1 wallet to exist") - .cn - .clone(); - let spend1_addr = net.send(&owner1, &owner2a, 100)?; - net.wallets - .get_mut(&owner1) - .expect("owner1 wallet to still exist") - .cn = cn_to_reuse; - let spend2_addr = net.send(&owner1, &owner2b, 100)?; - - // get the UTXOs of the two spends - let upk_of_2a = net - .wallets - .get(&owner2a) - .expect("owner2a wallet to exist") - .cn - .first() - .expect("owner2a wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_of_2a = SpendAddress::from_unique_pubkey(&upk_of_2a); - let upk_of_2b = net - .wallets - .get(&owner2b) - .expect("owner2b wallet to exist") - .cn - .first() - .expect("owner2b wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_of_2b = SpendAddress::from_unique_pubkey(&upk_of_2b); - - // make DAG - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - dag.insert(spend.address(), spend.clone()); - } - dag.record_faults(&genesis)?; - // dag.dump_to_file("/tmp/test_spend_dag_double_spend_detection")?; - - // make sure the double spend is detected - assert_eq!( - spend1_addr, spend2_addr, - "both spends should be at the same address" - ); - assert_eq!(spend1_addr.len(), 1, "there should only be one spend"); - let double_spent = spend1_addr.first().expect("spend1_addr to have an element"); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpend(*double_spent)]); - assert_eq!( - dag.get_spend_faults(double_spent), - expected, - "DAG should have detected double spend" - ); - - // make sure the UTXOs of the double spend are unspendable - let got = dag.get_spend_faults(&utxo_of_2a); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: utxo_of_2a, - ancestor: *double_spent, - }]); - assert_eq!( - got, expected, - "UTXO a of double spend should be unspendable" - ); - - let got = dag.get_spend_faults(&utxo_of_2b); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: utxo_of_2b, - ancestor: *double_spent, - }]); - assert_eq!( - got, expected, - "UTXO b of double spend should be unspendable" - ); - Ok(()) -} - -#[test] -fn test_spend_dag_missing_ancestry() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - - net.send(&owner1, &owner2, 100)?; - net.send(&owner2, &owner3, 100)?; - let spend_missing = net - .send(&owner3, &owner4, 100)? - .first() - .expect("spend_missing should have 1 element") - .to_owned(); - let spent_after1 = net - .send(&owner4, &owner5, 100)? - .first() - .expect("spent_after1 should have 1 element") - .to_owned(); - let spent_after2 = net - .send(&owner5, &owner6, 100)? - .first() - .expect("spent_after2 should have 1 element") - .to_owned(); - let utxo_after3 = net - .wallets - .get(&owner6) - .expect("owner6 wallet to exist") - .cn - .first() - .expect("owner6 wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_addr = SpendAddress::from_unique_pubkey(&utxo_after3); - - // create dag with one missing spend - let net_spends = net - .spends - .into_iter() - .filter(|s| spend_missing != s.address()); - let mut dag = SpendDag::new(genesis); - for spend in net_spends { - dag.insert(spend.address(), spend.clone()); - } - dag.record_faults(&genesis)?; - // dag.dump_to_file("/tmp/test_spend_dag_missing_ancestry")?; - - // make sure the missing spend makes its descendants invalid - let got = dag.get_spend_faults(&spent_after1); - let expected = BTreeSet::from_iter([SpendFault::MissingAncestry { - addr: spent_after1, - ancestor: spend_missing, - }]); - assert_eq!(got, expected, "DAG should have detected missing ancestry"); - - let got = dag.get_spend_faults(&spent_after2); - let expected = BTreeSet::from_iter([SpendFault::PoisonedAncestry( - spent_after2, - format!("missing ancestor at: {spend_missing:?}"), - )]); - assert_eq!( - got, expected, - "DAG should have propagated the error to descendants" - ); - - let got = dag.get_spend_faults(&utxo_addr); - let expected = BTreeSet::from_iter([SpendFault::PoisonedAncestry( - utxo_addr, - format!("missing ancestor at: {spend_missing:?}"), - )]); - assert_eq!( - got, expected, - "DAG should have propagated the error all the way to descendant utxos" - ); - Ok(()) -} - -#[test] -fn test_spend_dag_orphans() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - - net.send(&owner1, &owner2, 100)?; - net.send(&owner2, &owner3, 100)?; - let spend_missing1 = net - .send(&owner3, &owner4, 100)? - .first() - .expect("spend_missing should have 1 element") - .to_owned(); - let spend_missing2 = net - .send(&owner4, &owner5, 100)? - .first() - .expect("spend_missing2 should have 1 element") - .to_owned(); - let spent_after1 = net - .send(&owner5, &owner6, 100)? - .first() - .expect("spent_after1 should have 1 element") - .to_owned(); - let utxo_after2 = net - .wallets - .get(&owner6) - .expect("owner6 wallet to exist") - .cn - .first() - .expect("owner6 wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_addr = SpendAddress::from_unique_pubkey(&utxo_after2); - - // create dag with two missing spends in the chain - let net_spends = net - .spends - .into_iter() - .filter(|s| spend_missing1 != s.address() && spend_missing2 != s.address()); - let mut dag = SpendDag::new(genesis); - for spend in net_spends { - dag.insert(spend.address(), spend.clone()); - } - dag.record_faults(&genesis)?; - // dag.dump_to_file("/tmp/test_spend_dag_orphans")?; - - // make sure the spends after the two missing spends are orphans - let got = dag.get_spend_faults(&spent_after1); - let expected = BTreeSet::from_iter([ - SpendFault::OrphanSpend { - addr: spent_after1, - src: dag.source(), - }, - SpendFault::MissingAncestry { - addr: spent_after1, - ancestor: spend_missing2, - }, - ]); - assert_eq!(got, expected, "DAG should have detected orphan spend"); - - let got = dag.get_spend_faults(&utxo_addr); - let expected = SpendFault::OrphanSpend { - addr: utxo_addr, - src: dag.source(), - }; - assert!( - got.contains(&expected), - "Utxo of orphan spend should also be an orphan" - ); - Ok(()) -} diff --git a/sn_client/src/audit/tests/setup.rs b/sn_client/src/audit/tests/setup.rs deleted file mode 100644 index 4fa777ff22..0000000000 --- a/sn_client/src/audit/tests/setup.rs +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use std::collections::{BTreeMap, BTreeSet}; - -use bls::SecretKey; -use eyre::{eyre, Result}; -use sn_transfers::{ - get_genesis_sk, CashNote, DerivationIndex, MainPubkey, MainSecretKey, NanoTokens, SignedSpend, - SignedTransaction, SpendAddress, SpendReason, GENESIS_CASHNOTE, -}; - -pub struct MockWallet { - pub sk: MainSecretKey, - pub cn: Vec, -} - -pub struct MockNetwork { - pub genesis_spend: SpendAddress, - pub spends: BTreeSet, - pub wallets: BTreeMap, -} - -impl MockNetwork { - pub fn genesis() -> Result { - let mut net = MockNetwork { - genesis_spend: SpendAddress::from_unique_pubkey(&GENESIS_CASHNOTE.unique_pubkey()), - spends: BTreeSet::new(), - wallets: BTreeMap::new(), - }; - - // create genesis wallet - let genesis_cn = GENESIS_CASHNOTE.clone(); - let genesis_pk = *GENESIS_CASHNOTE.main_pubkey(); - net.wallets.insert( - genesis_pk, - MockWallet { - sk: get_genesis_sk(), - cn: vec![genesis_cn], - }, - ); - - // spend genesis - let everything = GENESIS_CASHNOTE.value().as_nano(); - let spent_addrs = net - .send(&genesis_pk, &genesis_pk, everything) - .map_err(|e| eyre!("failed to send genesis: {e}"))?; - net.genesis_spend = match spent_addrs.as_slice() { - [one] => *one, - _ => { - return Err(eyre!( - "Expected Genesis spend to be unique but got {spent_addrs:?}" - )) - } - }; - - Ok(net) - } - - pub fn new_pk_with_balance(&mut self, balance: u64) -> Result { - let owner = MainSecretKey::new(SecretKey::random()); - let owner_pk = owner.main_pubkey(); - self.wallets.insert( - owner_pk, - MockWallet { - sk: owner, - cn: Vec::new(), - }, - ); - - if balance > 0 { - let genesis_pk = GENESIS_CASHNOTE.main_pubkey(); - println!("Sending {balance} from genesis {genesis_pk:?} to {owner_pk:?}"); - self.send(genesis_pk, &owner_pk, balance) - .map_err(|e| eyre!("failed to get money from genesis: {e}"))?; - } - Ok(owner_pk) - } - - pub fn send( - &mut self, - from: &MainPubkey, - to: &MainPubkey, - amount: u64, - ) -> Result> { - let mut rng = rand::thread_rng(); - let from_wallet = self - .wallets - .get(from) - .ok_or_else(|| eyre!("from wallet not found: {from:?}"))?; - let to_wallet = self - .wallets - .get(to) - .ok_or_else(|| eyre!("to wallet not found: {to:?}"))?; - - // perform offline transfer - let derivation_index = DerivationIndex::random(&mut rng); - let recipient = vec![( - NanoTokens::from(amount), - to_wallet.sk.main_pubkey(), - derivation_index, - false, - )]; - let tx = SignedTransaction::new( - from_wallet.cn.clone(), - recipient, - from_wallet.sk.main_pubkey(), - SpendReason::default(), - &from_wallet.sk, - ) - .map_err(|e| eyre!("failed to create transfer: {}", e))?; - let spends = tx.spends; - - // update wallets - let mut updated_from_wallet_cns = from_wallet.cn.clone(); - updated_from_wallet_cns.retain(|cn| { - !spends - .iter() - .any(|s| s.unique_pubkey() == &cn.unique_pubkey()) - }); - if let Some(ref change_cn) = tx.change_cashnote { - if !updated_from_wallet_cns - .iter() - .any(|cn| cn.unique_pubkey() == change_cn.unique_pubkey()) - { - updated_from_wallet_cns.extend(tx.change_cashnote); - } - } - - self.wallets - .entry(*from) - .and_modify(|w| w.cn = updated_from_wallet_cns); - self.wallets - .entry(*to) - .and_modify(|w| w.cn.extend(tx.output_cashnotes)); - - // update network spends - let spent_addrs = spends.iter().map(|s| s.address()).collect(); - self.spends.extend(spends); - Ok(spent_addrs) - } -} diff --git a/sn_client/src/chunks/error.rs b/sn_client/src/chunks/error.rs deleted file mode 100644 index 6f9c83474e..0000000000 --- a/sn_client/src/chunks/error.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use self_encryption::MIN_ENCRYPTABLE_BYTES; -use sn_protocol::PrettyPrintRecordKey; -use std::io; -use thiserror::Error; -use xor_name::XorName; - -pub(crate) type Result = std::result::Result; - -/// Internal error. -#[derive(Debug, Error)] -pub enum Error { - #[error("Failed to get find payment for record: {0:?}")] - NoPaymentForRecord(PrettyPrintRecordKey<'static>), - - #[error("Failed to get chunk permit")] - CouldNotGetChunkPermit, - - #[error(transparent)] - SelfEncryption(#[from] self_encryption::Error), - - #[error(transparent)] - Io(#[from] io::Error), - - #[error(transparent)] - Serialisation(#[from] rmp_serde::encode::Error), - - #[error(transparent)] - Deserialisation(#[from] rmp_serde::decode::Error), - - #[error("Cannot store empty file.")] - EmptyFileProvided, - - #[error("File is too small to be encrypted, it is less than {MIN_ENCRYPTABLE_BYTES} bytes")] - FileTooSmall, - - #[error( - "The provided bytes ({size}) is too large to store as a `SmallFile` which maximum can be \ - {maximum}. Store as a LargeFile instead." - )] - TooLargeAsSmallFile { - /// Number of bytes - size: usize, - /// Maximum number of bytes for a `SmallFile` - maximum: usize, - }, - - #[error("Not all chunks were retrieved, expected {expected}, retrieved {retrieved}, missing {missing_chunks:?}.")] - NotEnoughChunksRetrieved { - /// Number of Chunks expected to be retrieved - expected: usize, - /// Number of Chunks retrieved - retrieved: usize, - /// Missing chunks - missing_chunks: Vec, - }, - - #[error("Chunk could not be retrieved from the network: {0:?}")] - ChunkMissing(XorName), - - #[error("Not all data was chunked, expected {expected}, but we have {chunked}.)")] - NotAllDataWasChunked { - /// Number of Chunks expected to be generated - expected: usize, - /// Number of Chunks generated - chunked: usize, - }, -} diff --git a/sn_client/src/chunks/pac_man.rs b/sn_client/src/chunks/pac_man.rs deleted file mode 100644 index 3cd368e320..0000000000 --- a/sn_client/src/chunks/pac_man.rs +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::Result; -use bytes::{BufMut, Bytes, BytesMut}; -use rayon::prelude::*; -use self_encryption::{DataMap, StreamSelfEncryptor, MAX_CHUNK_SIZE}; -use serde::{Deserialize, Serialize}; -use sn_protocol::storage::Chunk; -use std::{ - fs::File, - io::Write, - path::{Path, PathBuf}, -}; -use xor_name::XorName; - -#[derive(Serialize, Deserialize)] -pub(crate) enum DataMapLevel { - // Holds the data map to the source data. - First(DataMap), - // Holds the data map of an _additional_ level of chunks - // resulting from chunking up a previous level data map. - // This happens when that previous level data map was too big to fit in a chunk itself. - Additional(DataMap), -} - -#[expect(unused)] -pub(crate) fn encrypt_from_path(path: &Path, output_dir: &Path) -> Result<(Chunk, Vec)> { - let (data_map, mut encrypted_chunks) = self_encryption::encrypt_from_file(path, output_dir)?; - - let (data_map_chunk, additional_chunks) = pack_data_map(data_map)?; - - for chunk in additional_chunks.iter() { - encrypted_chunks.push(*chunk.name()); - let file_path = output_dir.join(hex::encode(chunk.name())); - let mut output_file = File::create(file_path)?; - output_file.write_all(&chunk.value)?; - } - - Ok((data_map_chunk, encrypted_chunks)) -} - -pub(crate) fn encrypt_large( - file_path: &Path, - output_dir: &Path, -) -> Result<(Chunk, Vec<(XorName, PathBuf)>)> { - let mut encryptor = StreamSelfEncryptor::encrypt_from_file( - file_path.to_path_buf(), - Some(output_dir.to_path_buf()), - )?; - - let data_map; - loop { - match encryptor.next_encryption()? { - (None, Some(m)) => { - // Returning a data_map means file encryption is completed. - data_map = m; - break; - } - _ => continue, - } - } - let mut encrypted_chunks: Vec<_> = data_map - .infos() - .iter() - .map(|chunk_info| { - let chunk_file_path = output_dir.join(hex::encode(chunk_info.dst_hash)); - (chunk_info.dst_hash, chunk_file_path) - }) - .collect(); - - // Pack the datamap into chunks that under the same output folder as well. - let (data_map_chunk, additional_chunks) = pack_data_map(data_map)?; - for chunk in additional_chunks.iter() { - let file_path = output_dir.join(hex::encode(chunk.name())); - encrypted_chunks.push((*chunk.name(), file_path.to_path_buf())); - let mut output_file = File::create(file_path)?; - output_file.write_all(&chunk.value)?; - } - - Ok((data_map_chunk, encrypted_chunks)) -} - -pub(crate) fn to_chunk(chunk_content: Bytes) -> Chunk { - Chunk::new(chunk_content) -} - -// Produces a chunk out of the first `DataMap`, which is validated for its size. -// If the chunk is too big, it is self-encrypted and the resulting (additional level) `DataMap` is put into a chunk. -// The above step is repeated as many times as required until the chunk size is valid. -// In other words: If the chunk content is too big, it will be -// self encrypted into additional chunks, and now we have a new `DataMap` -// which points to all of those additional chunks.. and so on. -fn pack_data_map(data_map: DataMap) -> Result<(Chunk, Vec)> { - let mut chunks = vec![]; - let mut chunk_content = wrap_data_map(&DataMapLevel::First(data_map))?; - debug!("Max chunk size: {} bytes", *MAX_CHUNK_SIZE); - - let (data_map_chunk, additional_chunks) = loop { - let chunk = to_chunk(chunk_content); - // If datamap chunk is less than or equal to MAX_CHUNK_SIZE return it so it can be directly sent to the network. - if chunk.serialised_size() <= *MAX_CHUNK_SIZE { - chunks.reverse(); - // Returns the last datamap, and all the chunks produced. - break (chunk, chunks); - } else { - let mut bytes = BytesMut::with_capacity(*MAX_CHUNK_SIZE).writer(); - let mut serialiser = rmp_serde::Serializer::new(&mut bytes); - chunk.serialize(&mut serialiser)?; - let serialized_chunk = bytes.into_inner().freeze(); - - let (data_map, next_encrypted_chunks) = self_encryption::encrypt(serialized_chunk)?; - chunks = next_encrypted_chunks - .par_iter() - .map(|c| to_chunk(c.content.clone())) // no need to encrypt what is self-encrypted - .chain(chunks) - .collect(); - chunk_content = wrap_data_map(&DataMapLevel::Additional(data_map))?; - } - }; - - Ok((data_map_chunk, additional_chunks)) -} - -fn wrap_data_map(data_map: &DataMapLevel) -> Result { - // we use an initial/starting size of 300 bytes as that's roughly the current size of a DataMapLevel instance. - let mut bytes = BytesMut::with_capacity(300).writer(); - let mut serialiser = rmp_serde::Serializer::new(&mut bytes); - data_map.serialize(&mut serialiser)?; - Ok(bytes.into_inner().freeze()) -} diff --git a/sn_client/src/error.rs b/sn_client/src/error.rs deleted file mode 100644 index d5af8bb22f..0000000000 --- a/sn_client/src/error.rs +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -pub(crate) type Result = std::result::Result; - -use crate::UploadSummary; - -use super::ClientEvent; -use sn_protocol::NetworkAddress; -use sn_registers::{Entry, EntryHash}; -use std::collections::BTreeSet; -use thiserror::Error; -use tokio::time::Duration; -use xor_name::XorName; - -/// Internal error. -#[derive(Debug, Error)] -pub enum Error { - #[error("Genesis disbursement failed")] - GenesisDisbursement, - - #[error("Genesis error {0}")] - GenesisError(#[from] sn_transfers::GenesisError), - - #[error("Wallet Error {0}.")] - Wallet(#[from] sn_transfers::WalletError), - - #[error("Transfer Error {0}.")] - Transfer(#[from] sn_transfers::TransferError), - - #[error("Network Error {0}.")] - Network(#[from] sn_networking::NetworkError), - - #[error("Protocol error {0}.")] - Protocol(#[from] sn_protocol::error::Error), - - #[error("Register error {0}.")] - Register(#[from] sn_registers::Error), - - #[error("Chunks error {0}.")] - Chunks(#[from] super::chunks::Error), - - #[error("Decrypting a Folder's item failed: {0}")] - FolderEntryDecryption(EntryHash), - - #[error("SelfEncryption Error {0}.")] - SelfEncryptionIO(#[from] self_encryption::Error), - - #[error("System IO Error {0}.")] - SystemIO(#[from] std::io::Error), - - #[error("Events receiver error {0}.")] - EventsReceiver(#[from] tokio::sync::broadcast::error::RecvError), - - #[error("Events sender error {0}.")] - EventsSender(#[from] tokio::sync::broadcast::error::SendError), - - #[error(transparent)] - JoinError(#[from] tokio::task::JoinError), - - /// A general error when verifying a transfer validity in the network. - #[error("Failed to verify transfer validity in the network {0}")] - CouldNotVerifyTransfer(String), - #[error("Invalid DAG")] - InvalidDag, - #[error("Serialization error: {0:?}")] - Serialization(#[from] rmp_serde::encode::Error), - #[error("Deserialization error: {0:?}")] - Deserialization(#[from] rmp_serde::decode::Error), - - #[error( - "Content branches detected in the Register which need to be merged/resolved by user. \ - Entries hashes of branches are: {0:?}" - )] - ContentBranchDetected(BTreeSet<(EntryHash, Entry)>), - - #[error("The provided amount contains zero nanos")] - AmountIsZero, - - #[error("The payee for the address {0:?} was not found.")] - PayeeNotFound(NetworkAddress), - - /// CashNote add would overflow - #[error("Total price exceed possible token amount")] - TotalPriceTooHigh, - - #[error("Could not connect to the network in {0:?}")] - ConnectionTimeout(Duration), - - #[error("Could not send files event")] - CouldNotSendFilesEvent, - - #[error("Incorrect Download Option")] - IncorrectDownloadOption, - - #[error("The provided data map is empty")] - EmptyDataMap, - - #[error("Error occurred while assembling the downloaded chunks")] - FailedToAssembleDownloadedChunks, - - #[error("Task completion notification channel is done")] - FailedToReadFromNotificationChannel, - - #[error("Could not find register after batch sync: {0:?}")] - RegisterNotFoundAfterUpload(XorName), - - #[error("Could not connect due to incompatible network protocols. Our protocol: {0} Network protocol: {1}")] - UnsupportedProtocol(String, String), - - // ------ Upload Errors -------- - #[error("Overflow occurred while adding values")] - NumericOverflow, - - #[error("Uploadable item not found: {0:?}")] - UploadableItemNotFound(XorName), - - #[error("Invalid upload item found")] - InvalidUploadItemFound, - - #[error("The state tracked by the uploader is empty")] - UploadStateTrackerIsEmpty, - - #[error("Internal task channel dropped")] - InternalTaskChannelDropped, - - #[error("Multiple consecutive network errors reported during upload")] - SequentialNetworkErrors, - - #[error("Too many sequential payment errors reported during upload")] - SequentialUploadPaymentError, - - #[error("The maximum specified repayments has been reached for a single item: {0:?}")] - MaximumRepaymentsReached(XorName), - - #[error("The upload failed with maximum repayments reached for multiple items: {items:?} Summary: {summary:?}")] - UploadFailedWithMaximumRepaymentsReached { - items: Vec, - summary: UploadSummary, - }, - - #[error("Error occurred when access wallet file")] - FailedToAccessWallet, - - #[error("Error parsing entropy for mnemonic phrase")] - FailedToParseEntropy, - - #[error("Error parsing mnemonic phrase")] - FailedToParseMnemonic, - - #[error("Invalid mnemonic seed phrase")] - InvalidMnemonicSeedPhrase, - - #[error("SecretKey could not be created from the provided bytes")] - InvalidKeyBytes, -} diff --git a/sn_client/src/event.rs b/sn_client/src/event.rs deleted file mode 100644 index 14ba654d0f..0000000000 --- a/sn_client/src/event.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use serde::Serialize; -use tokio::sync::broadcast::{self, error::RecvError}; - -// Channel where events will be broadcasted by the client. -#[derive(Clone, Debug)] -pub struct ClientEventsBroadcaster(broadcast::Sender); - -impl Default for ClientEventsBroadcaster { - fn default() -> Self { - Self(broadcast::channel(100).0) - } -} - -impl ClientEventsBroadcaster { - /// Returns a new receiver to listen to the channel. - /// Multiple receivers can be actively listening. - pub fn subscribe(&self) -> ClientEventsReceiver { - ClientEventsReceiver(self.0.subscribe()) - } - - // Broadcast a new event, meant to be a helper only used by the client's internals. - pub(crate) fn broadcast(&self, event: ClientEvent) { - if let Err(err) = self.0.send(event) { - if self.0.receiver_count() == 0 { - return; - } - trace!("Could not broadcast ClientEvent, though we do have listeners: {err:?}"); - } - } -} - -/// Type of events broadcasted by the client to the public API. -#[derive(Clone, custom_debug::Debug, Serialize)] -pub enum ClientEvent { - /// A peer has been added to the Routing table. - /// Also contains the max number of peers to connect to before we receive ClientEvent::ConnectedToNetwork - PeerAdded { max_peers_to_connect: usize }, - /// We've encountered a Peer with an unsupported protocol. - PeerWithUnsupportedProtocol { - our_protocol: String, - their_protocol: String, - }, - /// The client has been connected to the network - ConnectedToNetwork, - /// No network activity has been received for a given duration - /// we should error out - InactiveClient(tokio::time::Duration), -} - -/// Receiver Channel where users of the public API can listen to events broadcasted by the client. -#[derive(Debug)] -pub struct ClientEventsReceiver(pub(super) broadcast::Receiver); - -impl ClientEventsReceiver { - /// Receive a new event, meant to be used by the user of the public API. - pub async fn recv(&mut self) -> std::result::Result { - self.0.recv().await - } -} diff --git a/sn_client/src/faucet.rs b/sn_client/src/faucet.rs deleted file mode 100644 index a294256945..0000000000 --- a/sn_client/src/faucet.rs +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{wallet::send, Client, Error, Result}; -use sn_transfers::{load_genesis_wallet, HotWallet, NanoTokens, Transfer, FOUNDATION_PK}; - -const INITIAL_FAUCET_BALANCE: NanoTokens = NanoTokens::from(900000000000000000); - -/// Use the client to load the faucet wallet from the genesis Wallet. -/// With all balance transferred from the genesis_wallet to the faucet_wallet. -pub async fn fund_faucet_from_genesis_wallet( - client: &Client, - faucet_wallet: &mut HotWallet, -) -> Result<()> { - faucet_wallet.try_load_cash_notes()?; - let faucet_balance = faucet_wallet.balance(); - if !faucet_balance.is_zero() { - println!( - "Faucet wallet existing balance: {}", - faucet_wallet.balance() - ); - debug!( - "Faucet wallet existing balance: {}", - faucet_wallet.balance() - ); - - return Ok(()); - } - - info!("funding faucet from genesis..."); - - // Confirm Genesis not used yet - if client.is_genesis_spend_present().await { - warn!("Faucet can't get funded from genesis, genesis is already spent!"); - println!("Faucet can't get funded from genesis, genesis is already spent!"); - panic!("Faucet can't get funded from genesis, genesis is already spent!"); - } - - println!("Initiating genesis..."); - debug!("Initiating genesis..."); - let genesis_wallet = load_genesis_wallet()?; - let genesis_balance = genesis_wallet.balance(); - - let (foundation_cashnote, faucet_cashnote) = { - println!("Sending {INITIAL_FAUCET_BALANCE} from genesis to faucet wallet.."); - debug!("Sending {INITIAL_FAUCET_BALANCE} from genesis to faucet wallet.."); - - println!("Faucet wallet balance: {}", faucet_wallet.balance()); - debug!("Faucet wallet balance: {}", faucet_wallet.balance()); - let faucet_cashnote = send( - genesis_wallet, - INITIAL_FAUCET_BALANCE, - faucet_wallet.address(), - client, - true, - ) - .await?; - - faucet_wallet - .deposit_and_store_to_disk(&vec![faucet_cashnote.clone()]) - .expect("Faucet wallet shall be stored successfully."); - - // now send the money to the foundation - let foundation_balance = genesis_balance - .checked_sub(INITIAL_FAUCET_BALANCE) - .ok_or(Error::GenesisDisbursement)?; - - println!("Sending {foundation_balance:?} from genesis to foundation wallet.."); - debug!("Sending {foundation_balance:?} from genesis to foundation wallet.."); - - let genesis_wallet = load_genesis_wallet()?; - - let foundation_cashnote = send( - genesis_wallet, - foundation_balance, - *FOUNDATION_PK, - client, - true, - ) - .await?; - - (foundation_cashnote, faucet_cashnote) - }; - - println!("Faucet wallet balance: {}", faucet_wallet.balance()); - debug!("Faucet wallet balance: {}", faucet_wallet.balance()); - - println!("Verifying the transfer from genesis..."); - debug!("Verifying the transfer from genesis..."); - if let Err(error) = client.verify_cashnote(&foundation_cashnote).await { - error!("Could not verify the transfer from genesis to foundation: {error}. Panicking."); - panic!("Could not verify the transfer from genesis to foundation: {error}"); - } else { - println!( - "Successfully verified the transfer from genesis to foundation on the second try." - ); - - #[cfg(not(target_arch = "wasm32"))] - { - // write the foundation cashnote to disk - let root_dir = faucet_wallet.api().wallet_dir(); - - let foundation_transfer_path = root_dir.join("foundation_disbursement.transfer"); - - debug!("Writing cash note to: {foundation_transfer_path:?}"); - - let transfer = Transfer::transfer_from_cash_note(&foundation_cashnote)?.to_hex()?; - - if let Err(error) = std::fs::write(foundation_transfer_path, transfer) { - error!("Could not write the foundation transfer to disk: {error}."); - return Err(Error::from(error)); - } - } - - info!("Successfully verified the transfer from genesis to foundation on the second try."); - } - - if let Err(error) = client.verify_cashnote(&faucet_cashnote).await { - error!("Could not verify the transfer from genesis to faucet: {error}. Panicking."); - panic!("Could not verify the transfer from genesis to faucet: {error}"); - } else { - println!("Successfully verified the transfer from genesis to faucet on the second try."); - info!("Successfully verified the transfer from genesis to faucet on the second try."); - } - - Ok(()) -} diff --git a/sn_client/src/files.rs b/sn_client/src/files.rs deleted file mode 100644 index 8643b71961..0000000000 --- a/sn_client/src/files.rs +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -pub(crate) mod download; - -use crate::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, chunks::Error as ChunksError, - error::Result, wallet::StoragePaymentResult, Client, Error, WalletClient, -}; -use bytes::Bytes; -use self_encryption::{self, MIN_ENCRYPTABLE_BYTES}; -use sn_protocol::{ - storage::{Chunk, ChunkAddress, RetryStrategy}, - NetworkAddress, -}; - -use std::{ - fs::{self, create_dir_all, File}, - io::Write, - path::{Path, PathBuf}, -}; -use tempfile::tempdir; -use tracing::trace; -use xor_name::XorName; - -/// `BATCH_SIZE` determines the number of chunks that are processed in parallel during the payment and upload process. -pub const BATCH_SIZE: usize = 16; - -/// File APIs. -#[derive(Clone)] -pub struct FilesApi { - pub(crate) client: Client, - pub(crate) wallet_dir: PathBuf, -} - -/// This is the (file xorname, datamap_data, filesize, and chunks) -/// If the DataMapChunk exists and is not stored on the network, then it will not be accessible at this address of ChunkAddress(XorName) . -type ChunkFileResult = Result<(ChunkAddress, Chunk, u64, Vec<(XorName, PathBuf)>)>; - -impl FilesApi { - /// Create file apis instance. - pub fn new(client: Client, wallet_dir: PathBuf) -> Self { - Self { client, wallet_dir } - } - pub fn build(client: Client, wallet_dir: PathBuf) -> Result { - let wallet = load_account_wallet_or_create_with_mnemonic(&wallet_dir, None)?; - - if wallet.balance().is_zero() { - Err(Error::AmountIsZero) - } else { - Ok(FilesApi::new(client, wallet_dir)) - } - } - - /// Return the client instance - pub fn client(&self) -> &Client { - &self.client - } - - /// Create a new WalletClient for a given root directory. - pub fn wallet(&self) -> Result { - let path = self.wallet_dir.as_path(); - - let wallet = load_account_wallet_or_create_with_mnemonic(path, None)?; - - Ok(WalletClient::new(self.client.clone(), wallet)) - } - - /// Tries to chunk the file, returning `(head_address, data_map_chunk, file_size, chunk_names)` - /// and writes encrypted chunks to disk. - pub fn chunk_file( - file_path: &Path, - chunk_dir: &Path, - include_data_map_in_chunks: bool, - ) -> ChunkFileResult { - let file = File::open(file_path)?; - let metadata = file.metadata()?; - let file_size = metadata.len(); - - let (head_address, data_map_chunk, mut chunks_paths) = - if file_size < MIN_ENCRYPTABLE_BYTES as u64 { - Err(ChunksError::FileTooSmall)? - } else { - let (data_map_chunk, chunks) = encrypt_large(file_path, chunk_dir)?; - (*data_map_chunk.name(), data_map_chunk, chunks) - }; - - debug!("include_data_map_in_chunks {include_data_map_in_chunks:?}"); - - if include_data_map_in_chunks { - info!("Data_map_chunk to be written!"); - let data_map_path = chunk_dir.join(hex::encode(*data_map_chunk.name())); - - trace!("Data_map_chunk being written to {data_map_path:?}"); - let mut output_file = File::create(data_map_path.clone())?; - output_file.write_all(&data_map_chunk.value)?; - - chunks_paths.push((*data_map_chunk.name(), data_map_path)) - } - - Ok(( - ChunkAddress::new(head_address), - data_map_chunk, - file_size, - chunks_paths, - )) - } - - /// Directly writes Chunks to the network in the - /// form of immutable self encrypted chunks. - /// - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default - pub async fn get_local_payment_and_upload_chunk( - &self, - chunk: Chunk, - verify_store: bool, - retry_strategy: Option, - ) -> Result<()> { - let chunk_addr = chunk.network_address(); - trace!("Client upload started for chunk: {chunk_addr:?}"); - - let wallet_client = self.wallet()?; - let (payment, payee) = wallet_client.get_recent_payment_for_addr(&chunk_addr)?; - - debug!("Payments for chunk: {chunk_addr:?} to {payee:?}: {payment:?}"); - - self.client - .store_chunk(chunk, payee, payment, verify_store, retry_strategy) - .await?; - - wallet_client.remove_payment_for_addr(&chunk_addr)?; - - trace!("Client upload completed for chunk: {chunk_addr:?}"); - Ok(()) - } - - /// Pay for a given set of chunks. - /// - /// Returns the cost and the resulting new balance of the local wallet. - pub async fn pay_for_chunks(&self, chunks: Vec) -> Result { - let mut wallet_client = self.wallet()?; - info!("Paying for and uploading {:?} chunks", chunks.len()); - - let res = wallet_client - .pay_for_storage( - chunks - .iter() - .map(|name| NetworkAddress::ChunkAddress(ChunkAddress::new(*name))), - ) - .await?; - - wallet_client.store_local_wallet()?; - Ok(res) - } - - // -------------------------------------------- - // ---------- Private helpers ----------------- - // -------------------------------------------- - - /// Used for testing - pub async fn upload_test_bytes(&self, bytes: Bytes, verify: bool) -> Result { - let temp_dir = tempdir()?; - let file_path = temp_dir.path().join("tempfile"); - let mut file = File::create(&file_path)?; - file.write_all(&bytes)?; - - let chunk_path = temp_dir.path().join("chunk_path"); - create_dir_all(chunk_path.clone())?; - - let (head_address, _data_map, _file_size, chunks_paths) = - Self::chunk_file(&file_path, &chunk_path, true)?; - - for (_chunk_name, chunk_path) in chunks_paths { - let chunk = Chunk::new(Bytes::from(fs::read(chunk_path)?)); - self.get_local_payment_and_upload_chunk(chunk, verify, None) - .await?; - } - - Ok(NetworkAddress::ChunkAddress(head_address)) - } -} - -/// Encrypts a [`LargeFile`] and returns the resulting address and all chunk names. -/// Correspondent encrypted chunks are written in the specified output folder. -/// Does not store anything to the network. -/// -/// Returns data map as a chunk, and the resulting chunks -fn encrypt_large(file_path: &Path, output_dir: &Path) -> Result<(Chunk, Vec<(XorName, PathBuf)>)> { - Ok(crate::chunks::encrypt_large(file_path, output_dir)?) -} diff --git a/sn_client/src/files/download.rs b/sn_client/src/files/download.rs deleted file mode 100644 index 4444fab023..0000000000 --- a/sn_client/src/files/download.rs +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - chunks::{DataMapLevel, Error as ChunksError}, - error::{Error as ClientError, Result}, - Client, FilesApi, BATCH_SIZE, -}; -use bytes::Bytes; -use futures::StreamExt; -use itertools::Itertools; -use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk, StreamSelfDecryptor}; -use sn_networking::target_arch::Instant; -use sn_protocol::storage::{Chunk, ChunkAddress, RetryStrategy}; - -use std::{collections::HashMap, fs, path::PathBuf}; -use tokio::sync::mpsc::{self}; -use xor_name::XorName; - -/// The events emitted from the download process. -pub enum FilesDownloadEvent { - /// Downloaded a Chunk from the network - Downloaded(ChunkAddress), - /// The total number of chunks we are about to download. - /// Note: This count currently is not accurate. It does not take into account how we fetch the initial head chunk. - ChunksCount(usize), - /// The total number of data map chunks that we are about to download. This happens if the datamap file is. - /// very large. - /// Note: This count currently is not accurate. It does not take into account how we fetch the initial head chunk. - DatamapCount(usize), - /// The download process has terminated with an error. - Error, -} - -// Internally used to differentiate between the various ways that the downloaded chunks are returned. -enum DownloadReturnType { - EncryptedChunks(Vec), - DecryptedBytes(Bytes), - WrittenToFileSystem, -} - -/// `FilesDownload` provides functionality for downloading chunks with support for retries and queuing. -/// This struct is not cloneable. To create a new instance with default configuration, use the `new` function. -/// To modify the configuration, use the provided setter methods (`set_...` functions). -pub struct FilesDownload { - // Configurations - batch_size: usize, - show_holders: bool, - retry_strategy: RetryStrategy, - // API - api: FilesApi, - // Events - event_sender: Option>, - logged_event_sender_absence: bool, -} - -impl FilesDownload { - /// Creates a new instance of `FilesDownload` with the default configuration. - /// To modify the configuration, use the provided setter methods (`set_...` functions). - pub fn new(files_api: FilesApi) -> Self { - Self { - batch_size: BATCH_SIZE, - show_holders: false, - retry_strategy: RetryStrategy::Quick, - api: files_api, - event_sender: None, - logged_event_sender_absence: false, - } - } - - /// Sets the default batch size that determines the number of chunks that are downloaded in parallel - /// - /// By default, this option is set to the constant `BATCH_SIZE: usize = 64`. - pub fn set_batch_size(mut self, batch_size: usize) -> Self { - self.batch_size = batch_size; - self - } - - /// Sets the option to display the holders that are expected to be holding a chunk during verification. - /// - /// By default, this option is set to false. - pub fn set_show_holders(mut self, show_holders: bool) -> Self { - self.show_holders = show_holders; - self - } - - /// Sets the RetryStrategy to increase the re-try on failure attempts. - /// - /// By default, this option is set to RetryStrategy::Quick - pub fn set_retry_strategy(mut self, retry_strategy: RetryStrategy) -> Self { - self.retry_strategy = retry_strategy; - self - } - - /// Returns a receiver for file download events. - /// This method is optional and the download process can be performed without it. - pub fn get_events(&mut self) -> mpsc::Receiver { - let (event_sender, event_receiver) = mpsc::channel(10); - // should we return error if an sender is already set? - self.event_sender = Some(event_sender); - - event_receiver - } - - /// Download bytes from the network. The contents are spread across - /// multiple chunks in the network. This function invokes the self-encryptor and returns - /// the data that was initially stored. - /// - /// Takes `position` and `length` arguments which specify the start position - /// and the length of bytes to be read. - /// Passing `0` to position reads the data from the beginning, - /// and the `length` is just an upper limit. - pub async fn download_from( - &mut self, - address: ChunkAddress, - position: usize, - length: usize, - ) -> Result { - // clean up the trackers/stats - self.logged_event_sender_absence = false; - - let result = self.download_from_inner(address, position, length).await; - - // send an event indicating that the download process completed with an error - if result.is_err() { - self.send_event(FilesDownloadEvent::Error).await?; - } - - // drop the sender to close the channel. - let sender = self.event_sender.take(); - drop(sender); - - result - } - - pub async fn download_from_inner( - &mut self, - address: ChunkAddress, - position: usize, - length: usize, - ) -> Result { - debug!("Reading {length} bytes at: {address:?}, starting from position: {position}"); - let chunk = self - .api - .client - .get_chunk(address, false, Some(self.retry_strategy)) - .await?; - - // First try to deserialize a LargeFile, if it works, we go and seek it. - // If an error occurs, we consider it to be a SmallFile. - if let Ok(data_map) = self.unpack_chunk(chunk.clone()).await { - let info = self_encryption::seek_info(data_map.file_size(), position, length); - let range = &info.index_range; - let all_infos = data_map.infos(); - - let to_download = (range.start..range.end + 1) - .clone() - .map(|i| all_infos[i].clone()) - .collect_vec(); - let to_download = DataMap::new(to_download); - - // not written to file and return the encrypted chunks - if let DownloadReturnType::EncryptedChunks(encrypted_chunks) = - self.read(to_download, None, true, false).await? - { - let bytes = self_encryption::decrypt_range( - &data_map, - &encrypted_chunks, - info.relative_pos, - length, - ) - .map_err(ChunksError::SelfEncryption)?; - return Ok(bytes); - } else { - error!("IncorrectDownloadOption: expected to get the encrypted chunks back"); - return Err(ClientError::IncorrectDownloadOption); - } - } - - // The error above is ignored to avoid leaking the storage format detail of SmallFiles and LargeFiles. - // The basic idea is that we're trying to deserialize as one, and then the other. - // The cost of it is that some errors will not be seen without a refactor. - let mut bytes = chunk.value().clone(); - - let _ = bytes.split_to(position); - bytes.truncate(length); - - Ok(bytes) - } - - /// Download a file from the network and get the decrypted bytes. - /// If the data_map_chunk is not provided, the DataMap is fetched from the network using the provided address. - pub async fn download_file( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - ) -> Result { - if let Some(bytes) = self - .download_entire_file(address, data_map_chunk, None) - .await? - { - Ok(bytes) - } else { - error!("IncorrectDownloadOption: expected to get decrypted bytes, but we got None"); - Err(ClientError::IncorrectDownloadOption) - } - } - - /// Download a file from the network and write it to the provided path. - /// If the data_map_chunk is not provided, the DataMap is fetched from the network using the provided address. - pub async fn download_file_to_path( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - path: PathBuf, - ) -> Result<()> { - if self - .download_entire_file(address, data_map_chunk, Some(path)) - .await? - .is_none() - { - Ok(()) - } else { - error!( - "IncorrectDownloadOption: expected to not get any decrypted bytes, but got Some" - ); - Err(ClientError::IncorrectDownloadOption) - } - } - - /// Download a file from the network. - /// If you want to track the download progress, use the `get_events` method. - async fn download_entire_file( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - downloaded_file_path: Option, - ) -> Result> { - // clean up the trackers/stats - self.logged_event_sender_absence = false; - - let result = self - .download_entire_file_inner(address, data_map_chunk, downloaded_file_path) - .await; - - // send an event indicating that the download process completed with an error - if result.is_err() { - self.send_event(FilesDownloadEvent::Error).await?; - } - - // drop the sender to close the channel. - let sender = self.event_sender.take(); - drop(sender); - - result - } - - async fn download_entire_file_inner( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - downloaded_file_path: Option, - ) -> Result> { - let head_chunk = if let Some(chunk) = data_map_chunk { - info!("Downloading via supplied local datamap"); - chunk - } else { - match self - .api - .client - .get_chunk(address, self.show_holders, Some(self.retry_strategy)) - .await - { - Ok(chunk) => chunk, - Err(err) => { - error!("Failed to fetch head chunk {address:?}"); - return Err(err); - } - } - }; - - // first try to deserialize a LargeFile, if it works, we go and seek it - match self.unpack_chunk(head_chunk.clone()).await { - Ok(data_map) => { - // read_all emits - match self - .read(data_map, downloaded_file_path, false, false) - .await? - { - DownloadReturnType::EncryptedChunks(_) => { - error!("IncorrectDownloadOption: we should not be getting the encrypted chunks back as it is set to false."); - Err(ClientError::IncorrectDownloadOption) - } - DownloadReturnType::DecryptedBytes(bytes) => Ok(Some(bytes)), - DownloadReturnType::WrittenToFileSystem => Ok(None), - } - } - Err(ClientError::Chunks(ChunksError::Deserialisation(_))) => { - // Only in case of a deserialisation error, - // shall consider the head chunk to be a SmallFile. - // With the min-size now set to 3 Bytes, such case shall be rare. - // Hence raise a warning for it. - warn!("Consider head chunk {address:?} as an SmallFile"); - println!("Consider head chunk {address:?} as an SmallFile"); - - self.send_event(FilesDownloadEvent::ChunksCount(1)).await?; - self.send_event(FilesDownloadEvent::Downloaded(address)) - .await?; - if let Some(path) = downloaded_file_path { - fs::write(path, head_chunk.value().clone())?; - Ok(None) - } else { - Ok(Some(head_chunk.value().clone())) - } - } - Err(err) => { - // For large data_map that consists of multiple chunks, - // `unpack_chunk` function will try to fetch those chunks from network. - // During the process, any chunk could be failed to download, - // hence trigger an error to be raised. - error!("Encounter error when unpack head_chunk {address:?} : {err:?}"); - println!("Encounter error when unpack head_chunk {address:?} : {err:?}"); - Err(err) - } - } - } - - /// The internal logic to download the provided chunks inside the datamap. - /// If the decrypted_file_path is provided, we return DownloadReturnType::WrittenToFileSystem - /// If return_encrypted_chunks is true, we return DownloadReturnType::EncryptedChunks - /// Else we return DownloadReturnType::DecryptedBytes - /// - /// Set we_are_downloading_a_datamap if we want to emit the DatamapCount else we emit ChunksCount - async fn read( - &mut self, - data_map: DataMap, - decrypted_file_path: Option, - return_encrypted_chunks: bool, - we_are_downloading_a_datamap: bool, - ) -> Result { - // used internally - enum DownloadKind { - FileSystem(StreamSelfDecryptor), - Memory(Vec), - } - - let mut download_kind = { - if let Some(path) = decrypted_file_path { - DownloadKind::FileSystem(StreamSelfDecryptor::decrypt_to_file(path, &data_map)?) - } else { - DownloadKind::Memory(Vec::new()) - } - }; - let chunk_infos = data_map.infos(); - let expected_count = chunk_infos.len(); - - if we_are_downloading_a_datamap { - self.send_event(FilesDownloadEvent::ChunksCount(expected_count)) - .await?; - } else { - // we're downloading the chunks related to a huge datamap - self.send_event(FilesDownloadEvent::DatamapCount(expected_count)) - .await?; - } - - let now = Instant::now(); - - let client_clone = self.api.client.clone(); - let show_holders = self.show_holders; - let retry_strategy = self.retry_strategy; - // the initial index is not always 0 as we might seek a range of bytes. So fetch the first index - let mut current_index = chunk_infos - .first() - .ok_or_else(|| ClientError::EmptyDataMap)? - .index; - let mut stream = futures::stream::iter(chunk_infos.into_iter()) - .map(|chunk_info| { - Self::get_chunk( - client_clone.clone(), - chunk_info.dst_hash, - chunk_info.index, - show_holders, - retry_strategy, - ) - }) - .buffer_unordered(self.batch_size); - - let mut chunk_download_cache = HashMap::new(); - - while let Some(result) = stream.next().await { - let (chunk_address, index, encrypted_chunk) = result?; - // notify about the download - self.send_event(FilesDownloadEvent::Downloaded(chunk_address)) - .await?; - info!("Downloaded chunk of index {index:?}. We are at current_index {current_index:?}"); - - // check if current_index is present in the cache before comparing the fetched index. - // try to keep removing from the cache until we run out of sequential chunks to insert. - while let Some(encrypted_chunk) = chunk_download_cache.remove(¤t_index) { - debug!("Got current_index {current_index:?} from the download cache. Incrementing current index"); - match &mut download_kind { - DownloadKind::FileSystem(decryptor) => { - let _ = decryptor.next_encrypted(encrypted_chunk)?; - } - DownloadKind::Memory(collector) => collector.push(encrypted_chunk), - } - current_index += 1; - } - // now check if we can process the fetched index, else cache it. - if index == current_index { - debug!("The downloaded chunk's index {index:?} matches the current index {current_index}. Processing it"); - match &mut download_kind { - DownloadKind::FileSystem(decryptor) => { - let _ = decryptor.next_encrypted(encrypted_chunk)?; - } - DownloadKind::Memory(collector) => collector.push(encrypted_chunk), - } - current_index += 1; - } else { - // since we download the chunks concurrently without order, we cache the results for an index that - // finished earlier - debug!("The downloaded chunk's index {index:?} does not match with the current_index {current_index}. Inserting into cache"); - let _ = chunk_download_cache.insert(index, encrypted_chunk); - } - } - - // finally empty out the cache. - debug!("Finally emptying out the download cache"); - while let Some(encrypted_chunk) = chunk_download_cache.remove(¤t_index) { - debug!("Got current_index {current_index:?} from the download cache. Incrementing current index"); - match &mut download_kind { - DownloadKind::FileSystem(decryptor) => { - let _ = decryptor.next_encrypted(encrypted_chunk)?; - } - DownloadKind::Memory(collector) => collector.push(encrypted_chunk), - } - current_index += 1; - } - if !chunk_download_cache.is_empty() { - error!( - "The chunk download cache is not empty. Current index {current_index:?}. The indices inside the cache: {:?}", - chunk_download_cache.keys() - ); - return Err(ClientError::FailedToAssembleDownloadedChunks); - } - - let elapsed = now.elapsed(); - info!("Client downloaded file in {elapsed:?}"); - - match download_kind { - DownloadKind::FileSystem(_) => Ok(DownloadReturnType::WrittenToFileSystem), - DownloadKind::Memory(collector) => { - let result = if return_encrypted_chunks { - DownloadReturnType::EncryptedChunks(collector) - } else { - let bytes = decrypt_full_set(&data_map, &collector) - .map_err(ChunksError::SelfEncryption)?; - DownloadReturnType::DecryptedBytes(bytes) - }; - - Ok(result) - } - } - } - - /// Extracts a file DataMapLevel from a chunk. - /// If the DataMapLevel is not the first level mapping directly to the user's contents, - /// the process repeats itself until it obtains the first level DataMapLevel. - pub async fn unpack_chunk(&mut self, mut chunk: Chunk) -> Result { - loop { - match rmp_serde::from_slice(chunk.value()).map_err(ChunksError::Deserialisation)? { - DataMapLevel::First(data_map) => { - return Ok(data_map); - } - DataMapLevel::Additional(data_map) => { - if let DownloadReturnType::DecryptedBytes(serialized_chunk) = - self.read(data_map, None, false, true).await? - { - chunk = rmp_serde::from_slice(&serialized_chunk) - .map_err(ChunksError::Deserialisation)?; - } else { - error!("IncorrectDownloadOption: we should be getting the decrypted bytes back."); - return Err(ClientError::IncorrectDownloadOption); - } - } - } - } - } - - async fn send_event(&mut self, event: FilesDownloadEvent) -> Result<()> { - if let Some(sender) = self.event_sender.as_ref() { - sender.send(event).await.map_err(|err| { - error!("Could not send files download event due to {err:?}"); - ClientError::CouldNotSendFilesEvent - })?; - } else if !self.logged_event_sender_absence { - info!("Files download event sender is not set. Use get_events() if you need to keep track of the progress"); - self.logged_event_sender_absence = true; - } - Ok(()) - } - - async fn get_chunk( - client: Client, - address: XorName, - index: usize, - show_holders: bool, - retry_strategy: RetryStrategy, - ) -> std::result::Result<(ChunkAddress, usize, EncryptedChunk), ChunksError> { - let chunk = client - .get_chunk( - ChunkAddress::new(address), - show_holders, - Some(retry_strategy), - ) - .await - .map_err(|err| { - error!("Chunk missing {address:?} with {err:?}",); - ChunksError::ChunkMissing(address) - })?; - let encrypted_chunk = EncryptedChunk { - index, - content: chunk.value, - }; - Ok((chunk.address, index, encrypted_chunk)) - } -} diff --git a/sn_client/src/folders.rs b/sn_client/src/folders.rs deleted file mode 100644 index e2c94ef929..0000000000 --- a/sn_client/src/folders.rs +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{error::Result, Client, ClientRegister, WalletClient}; -use crate::{acc_packet::load_account_wallet_or_create_with_mnemonic, Error, FilesApi, UploadCfg}; -use bls::{Ciphertext, PublicKey}; -use bytes::{BufMut, BytesMut}; -use self_encryption::MAX_CHUNK_SIZE; -use serde::{Deserialize, Serialize}; -use sn_protocol::{ - storage::{Chunk, ChunkAddress, RegisterAddress}, - NetworkAddress, -}; -use sn_registers::{Entry, EntryHash}; - -use std::{ - collections::{BTreeMap, BTreeSet}, - ffi::OsString, - path::{Path, PathBuf}, -}; -use xor_name::{XorName, XOR_NAME_LEN}; - -/// Folder Entry representing either a file or subfolder. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum FolderEntry { - File(Chunk), - Folder(RegisterAddress), -} - -/// Metadata to be stored on a Chunk, linked from and belonging to Registers' entries. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Metadata { - pub name: String, - pub content: FolderEntry, -} - -// This is the entry value used in Folders to mark a removed file/folder. -const REMOVED_ENTRY_MARK: XorName = XorName([0; XOR_NAME_LEN]); - -/// Folders APIs. -#[derive(Clone)] -pub struct FoldersApi { - client: Client, - wallet_dir: PathBuf, - register: ClientRegister, - files_api: FilesApi, - // Cache of metadata chunks. We keep the Chunk itself till we upload it to the network. - metadata: BTreeMap)>, -} - -impl FoldersApi { - /// Create FoldersApi instance. - pub fn new( - client: Client, - wallet_dir: &Path, - address: Option, - ) -> Result { - let register = if let Some(addr) = address { - ClientRegister::create_with_addr(client.clone(), addr) - } else { - let mut rng = rand::thread_rng(); - ClientRegister::create(client.clone(), XorName::random(&mut rng)) - }; - - Self::create(client, wallet_dir, register) - } - - /// Clones the register instance. Any change made to one instance will not be reflected on the other register. - pub fn register(&self) -> ClientRegister { - self.register.clone() - } - - /// Return the address of the Folder (Register address) on the network - pub fn address(&self) -> &RegisterAddress { - self.register.address() - } - - /// Return the address of the Folder (Register address) as a NetworkAddress - pub fn as_net_addr(&self) -> NetworkAddress { - NetworkAddress::RegisterAddress(*self.address()) - } - - /// Return the list of metadata chunks addresses that need to be payed for in order to be - /// able to then store all data on the network upon calling `sync` method. - pub fn meta_addrs_to_pay(&self) -> BTreeSet { - self.metadata - .iter() - .filter_map(|(meta_xorname, (_, chunk))| { - chunk - .as_ref() - .map(|_| NetworkAddress::ChunkAddress(ChunkAddress::new(*meta_xorname))) - }) - .collect() - } - - /// Return the list of metadata chunks. - pub fn meta_chunks(&self) -> BTreeSet { - self.metadata - .iter() - .filter_map(|(_, (_, chunk))| chunk.clone()) - .collect() - } - - /// Create a new WalletClient from the directory set. - pub fn wallet(&self) -> Result { - let wallet = load_account_wallet_or_create_with_mnemonic(&self.wallet_dir, None)?; - Ok(WalletClient::new(self.client.clone(), wallet)) - } - - /// Add provided file as entry of this Folder (locally). - /// The new file's metadata chunk will be encrypted if a key has been provided. - pub fn add_file( - &mut self, - file_name: OsString, - data_map_chunk: Chunk, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - // create metadata Chunk for this entry - let metadata = Metadata { - name: file_name.to_str().unwrap_or("unknown").to_string(), - content: FolderEntry::File(data_map_chunk), - }; - - self.add_entry(metadata, &BTreeSet::default(), encryption_pk) - } - - /// Add subfolder as entry of this Folder (locally). - /// The new folder's metadata chunk will be encrypted if a key has been provided. - pub fn add_folder( - &mut self, - folder_name: OsString, - address: RegisterAddress, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - // create metadata Chunk for this entry - let metadata = Metadata { - name: folder_name.to_str().unwrap_or("unknown").to_string(), - content: FolderEntry::Folder(address), - }; - - self.add_entry(metadata, &BTreeSet::default(), encryption_pk) - } - - /// Replace an existing file with the provided one (locally). - /// The new file's metadata chunk will be encrypted if a key has been provided. - pub fn replace_file( - &mut self, - existing_entry: EntryHash, - file_name: OsString, - data_map_chunk: Chunk, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - // create metadata Chunk for this entry - let metadata = Metadata { - name: file_name.to_str().unwrap_or("unknown").to_string(), - content: FolderEntry::File(data_map_chunk), - }; - - self.add_entry( - metadata, - &vec![existing_entry].into_iter().collect(), - encryption_pk, - ) - } - - /// Remove a file/folder item from this Folder (locally). - pub fn remove_item(&mut self, existing_entry: EntryHash) -> Result<()> { - let _ = self.register.write_atop( - &REMOVED_ENTRY_MARK, - &vec![existing_entry].into_iter().collect(), - )?; - Ok(()) - } - - /// Sync local Folder with the network. - pub async fn sync(&mut self, upload_cfg: UploadCfg) -> Result<()> { - let mut wallet_client = self.wallet()?; - - // First upload any newly created metadata chunk - for (_, meta_chunk) in self.metadata.values_mut() { - if let Some(chunk) = meta_chunk.take() { - self.files_api - .get_local_payment_and_upload_chunk( - chunk.clone(), - upload_cfg.verify_store, - Some(upload_cfg.retry_strategy), - ) - .await?; - } - } - - let payment_info = wallet_client.get_recent_payment_for_addr(&self.as_net_addr())?; - - self.register - .sync( - &mut wallet_client, - upload_cfg.verify_store, - Some(payment_info), - ) - .await?; - - Ok(()) - } - - /// Download a copy of the Folder from the network. - pub async fn retrieve( - client: Client, - wallet_dir: &Path, - address: RegisterAddress, - ) -> Result { - let register = ClientRegister::retrieve(client.clone(), address).await?; - Self::create(client, wallet_dir, register) - } - - /// Returns true if there is a file/folder which matches the given entry hash - pub fn contains(&self, entry_hash: &EntryHash) -> bool { - self.register - .read() - .iter() - .any(|(hash, _)| hash == entry_hash) - } - - /// Find file/folder in this Folder by its name, returning metadata chunk xorname and metadata itself. - pub fn find_by_name(&self, name: &str) -> Option<(&XorName, &Metadata)> { - // let's get the list of metadata xornames of non-removed entries - let non_removed_items: BTreeSet = self - .register - .read() - .iter() - .map(|(_, meta_xorname_entry)| xorname_from_entry(meta_xorname_entry)) - .collect(); - - self.metadata - .iter() - .find_map(|(meta_xorname, (metadata, _))| { - if metadata.name == name && non_removed_items.contains(meta_xorname) { - Some((meta_xorname, metadata)) - } else { - None - } - }) - } - - /// Returns the list of entries of this Folder, including their entry hash, - /// metadata chunk xorname, and metadata itself. - pub async fn entries(&mut self) -> Result> { - let mut entries = BTreeMap::new(); - for (entry_hash, entry) in self.register.read() { - let meta_xorname = xorname_from_entry(&entry); - if meta_xorname == REMOVED_ENTRY_MARK { - continue; - } - - let metadata = match self.metadata.get(&meta_xorname) { - Some((metadata, _)) => metadata.clone(), - None => { - // retrieve metadata Chunk from network - let chunk = self - .client - .get_chunk(ChunkAddress::new(meta_xorname), false, None) - .await?; - - // let's first assume it's unencrypted - let metadata: Metadata = match rmp_serde::from_slice(chunk.value()) { - Ok(metadata) => metadata, - Err(err) => { - // let's try to decrypt it then - let cipher = Ciphertext::from_bytes(chunk.value()).map_err(|_| err)?; - let data = self - .client - .signer() - .decrypt(&cipher) - .ok_or(Error::FolderEntryDecryption(entry_hash))?; - - // if this fails, it's either the wrong key or unexpected data - rmp_serde::from_slice(&data) - .map_err(|_| Error::FolderEntryDecryption(entry_hash))? - } - }; - self.metadata.insert(meta_xorname, (metadata.clone(), None)); - metadata - } - }; - entries.insert(entry_hash, (meta_xorname, metadata)); - } - Ok(entries) - } - - // Private helpers - - // Create a new FoldersApi instance with given register. - fn create(client: Client, wallet_dir: &Path, register: ClientRegister) -> Result { - let files_api = FilesApi::new(client.clone(), wallet_dir.to_path_buf()); - - Ok(Self { - client, - wallet_dir: wallet_dir.to_path_buf(), - register, - files_api, - metadata: BTreeMap::new(), - }) - } - - // Add the given entry to the underlying Register as well as creating the metadata Chunk. - // If an encryption key is given, the metadata chunk will be encrpyted with it. - fn add_entry( - &mut self, - metadata: Metadata, - children: &BTreeSet, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - let mut bytes = BytesMut::with_capacity(*MAX_CHUNK_SIZE); - let serialised_metadata = rmp_serde::to_vec(&metadata)?; - if let Some(pk) = encryption_pk { - bytes.put( - pk.encrypt(serialised_metadata.as_slice()) - .to_bytes() - .as_slice(), - ); - } else { - bytes.put(serialised_metadata.as_slice()); - } - let meta_chunk = Chunk::new(bytes.freeze()); - let meta_xorname = *meta_chunk.name(); - - self.metadata - .insert(meta_xorname, (metadata.clone(), Some(meta_chunk))); - let entry_hash = self.register.write_atop(&meta_xorname, children)?; - - Ok((entry_hash, meta_xorname, metadata)) - } -} - -// Helper to convert a Register/Folder entry into a XorName -fn xorname_from_entry(entry: &Entry) -> XorName { - let mut xorname = [0; XOR_NAME_LEN]; - xorname.copy_from_slice(entry); - XorName(xorname) -} diff --git a/sn_client/src/lib.rs b/sn_client/src/lib.rs deleted file mode 100644 index 3af4d517b3..0000000000 --- a/sn_client/src/lib.rs +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -//! > **Core functionalities for interacting with the SAFE Network** -//! -//! The `sn_client` crate is a part of the [Safe Network](https://safenetwork.tech/) (SN), -//! and plays a crucial role in this ecosystem by serving as the client library that allows -//! applications and users to interact with the Safe Network, and build applications that -//! leverage the Safe Network's capabilities, providing a high-level API that simplifies the development process. -//! -//! Here are the key functionalities provided by this crate: -//! -//! 1. **Network Communication**: It handles communication with the Safe Network, enabling clients to -//! send and receive messages from the decentralized nodes that make up the network. -//! -//! 2. **Data Storage and Retrieval**: to store and retrieve data on the Safe Network. -//! This includes both private and public data, ensuring privacy and security. -//! -//! 3. **Authentication and Access Control**: It provides mechanisms for authenticating users and -//! managing access to data, ensuring that only authorized users can access sensitive information. -//! -//! 4. **File Management**: The crate supports operations related to file management, such as uploading, -//! downloading, and managing files and directories on the Safe Network. -//! -//! 5. **Token Management**: It includes functionality for managing Safe Network tokens, which can be -//! used for various purposes within the network, including paying for storage and services. -//! -//! ## Quick links -//! - [Crates.io](https://crates.io/crates/sn_client) -//! - [Forum](https://forum.autonomi.community/) -//! - [Issues on GitHub](https://github.com/maidsafe/safe_network/issues) -//! - -#[macro_use] -extern crate tracing; - -pub mod acc_packet; -pub mod api; -mod audit; -mod chunks; -mod error; -mod event; -mod faucet; -mod files; -mod folders; -mod register; -mod uploader; -mod wallet; - -/// Test utils -#[cfg(feature = "test-utils")] -pub mod test_utils; - -// re-export used crates to make them available to app builders -// this ensures the version of the crates used by the app builders are the same as the ones used by the client -// so they don't run into issues with incompatible types due to different versions of the same crate -pub use sn_networking as networking; -pub use sn_protocol as protocol; -pub use sn_registers as registers; -pub use sn_transfers as transfers; - -const MAX_CONCURRENT_TASKS: usize = 4096; - -pub use self::{ - audit::{DagError, SpendDag, SpendDagGet, SpendFault}, - error::Error, - event::{ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver}, - faucet::fund_faucet_from_genesis_wallet, - files::{ - download::{FilesDownload, FilesDownloadEvent}, - FilesApi, BATCH_SIZE, - }, - folders::{FolderEntry, FoldersApi, Metadata}, - register::ClientRegister, - uploader::{UploadCfg, UploadEvent, UploadSummary, Uploader}, - wallet::{send, StoragePaymentResult, WalletClient}, -}; -pub(crate) use error::Result; - -use sn_networking::Network; -use std::sync::Arc; - -#[cfg(target_arch = "wasm32")] -use console_error_panic_hook; -#[cfg(target_arch = "wasm32")] -use wasm_bindgen::prelude::*; -#[cfg(target_arch = "wasm32")] -use web_sys::console; - -// This is like the `main` function, except for JavaScript. -#[cfg(target_arch = "wasm32")] -#[wasm_bindgen(start)] -pub async fn main_js() -> std::result::Result<(), JsValue> { - // This provides better error messages in debug mode. - // It's disabled in release mode so it doesn't bloat up the file size. - // #[cfg(debug_assertions)] - console_error_panic_hook::set_once(); - - console::log_1(&JsValue::from_str("Hello safe world!")); - - // Tracing - // TODO: dont log _everything_ - // right now it logs all libp2p entirely. - tracing_wasm::set_as_global_default(); - - Ok(()) -} - -/// A quick client that only takes some peers to connect to -#[wasm_bindgen] -#[cfg(target_arch = "wasm32")] -pub async fn get_data(peer: &str, data_address: &str) -> std::result::Result<(), JsError> { - let bytes = hex::decode(&data_address).expect("Input address is not a hex string"); - let xor_name = xor_name::XorName( - bytes - .try_into() - .expect("Failed to parse XorName from hex string"), - ); - - use sn_protocol::storage::ChunkAddress; - console::log_1(&JsValue::from_str(peer)); - - let the_peer = sn_peers_acquisition::parse_peer_addr(peer)?; - - console::log_1(&JsValue::from_str(&format!( - "Provided Peer was {the_peer:?}" - ))); - - // TODO: We need to tidy this up, the client loops forever in the browser, and eventually crashes - // it does _do things_ but errors surface, and even after getting data, it continues... - let client = Client::quick_start(Some(vec![the_peer])) - .await - .map_err(|e| JsError::new(&format!("Client could not start: {e:?}")))?; - - console::log_1(&JsValue::from_str("Client started {chunk:?}")); - - let chunk = client - .get_chunk(ChunkAddress::new(xor_name), false, None) - .await - .map_err(|e| JsError::new(&format!("Client get data failed: {e:?}")))?; - - console::log_1(&JsValue::from_str(&format!("Data found {chunk:?}"))); - - Ok(()) -} - -/// Client API implementation to store and get data. -#[derive(Clone)] -pub struct Client { - network: Network, - events_broadcaster: ClientEventsBroadcaster, - signer: Arc, -} diff --git a/sn_client/src/register.rs b/sn_client/src/register.rs deleted file mode 100644 index a19674aca4..0000000000 --- a/sn_client/src/register.rs +++ /dev/null @@ -1,869 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{wallet::StoragePaymentResult, Client, Error, Result, WalletClient}; -use bls::PublicKey; -use crdts::merkle_reg::MerkleReg; -use libp2p::{ - kad::{Quorum, Record}, - PeerId, -}; -use sn_networking::{GetRecordCfg, PutRecordCfg, VerificationKind}; -use sn_protocol::{ - error::Error as ProtocolError, - messages::RegisterCmd, - storage::{try_serialize_record, RecordKind, RetryStrategy}, - NetworkAddress, -}; -use sn_registers::{Entry, EntryHash, Permissions, Register, RegisterAddress, SignedRegister}; -use sn_transfers::{NanoTokens, Payment}; -use std::collections::{BTreeSet, HashSet, LinkedList}; -use xor_name::XorName; - -/// Cached operations made to an offline Register instance are applied locally only, -/// and accumulated until the user explicitly calls 'sync'. The user can -/// switch back to sync with the network for every op by invoking `online` API. -#[derive(Clone, custom_debug::Debug)] -pub struct ClientRegister { - #[debug(skip)] - client: Client, - pub register: Register, - pub ops: LinkedList, // Cached operations. -} - -impl ClientRegister { - fn create_register(client: Client, meta: XorName, perms: Permissions) -> Self { - let public_key = client.signer_pk(); - - let register = Register::new(public_key, meta, perms); - Self { - client, - register, - ops: LinkedList::new(), - } - } - - /// Create a new Register Locally. - /// # Arguments - /// * 'client' - [Client] - /// * 'meta' - [XorName] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// // Here we create a ClientRegister - /// let register = ClientRegister::create(client.clone(), address); - /// # Ok(()) - /// # } - /// ``` - pub fn create(client: Client, meta: XorName) -> Self { - Self::create_register(client, meta, Permissions::default()) - } - - /// Create a new Register locally with a specific address. - /// # Arguments - /// * 'client' - [Client] - /// * 'addr' - [RegisterAddress] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use sn_protocol::storage::RegisterAddress; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = RegisterAddress::new(XorName::random(&mut rng), client.signer_pk()); - /// // Here we create a ClientRegister - /// let register = ClientRegister::create_with_addr(client.clone(), address); - /// # Ok(()) - /// # } - /// ``` - pub fn create_with_addr(client: Client, addr: RegisterAddress) -> Self { - let register = Register::new(addr.owner(), addr.meta(), Permissions::default()); - Self { - client, - register, - ops: LinkedList::new(), - } - } - - /// Create a new Register and send it to the Network. - /// - /// # Arguments - /// * 'client' - [Client] - /// * 'meta' - [XorName] - /// * 'wallet_client' - A borrowed mutable [WalletClient] - /// * `verify_store` - A boolean to verify store. Set this to true for mandatory verification. - /// * 'perms' - [Permissions] - /// - /// Return type: Result<(Self, [NanoTokens], [NanoTokens])> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// let permissions = Permissions::default(); - /// // Instantiate a new Register replica from a predefined address. - /// // The create_online function runs a [sync](ClientRegister::sync) internally. - /// let (client_register, mut total_cost, mut total_royalties) = ClientRegister::create_online( - /// client, - /// address, - /// &mut wallet_client, - /// false, - /// permissions, - /// ).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn create_online( - client: Client, - meta: XorName, - wallet_client: &mut WalletClient, - verify_store: bool, - perms: Permissions, - ) -> Result<(Self, NanoTokens, NanoTokens)> { - let mut reg = Self::create_register(client, meta, perms); - let (storage_cost, royalties_fees) = reg.sync(wallet_client, verify_store, None).await?; - Ok((reg, storage_cost, royalties_fees)) - } - - /// Retrieve a Register from the network to work on it offline. - pub(super) async fn retrieve(client: Client, address: RegisterAddress) -> Result { - let register = Self::get_register_from_network(&client, address).await?; - - Ok(Self { - client, - register, - ops: LinkedList::new(), - }) - } - - /// Return type: [RegisterAddress] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// # let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// # let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can use the address. In this example, we print it out: - /// println!("REGISTER_ADDRESS={}", client_register.address().to_hex()); - /// # Ok(()) - /// # } - /// ``` - pub fn address(&self) -> &RegisterAddress { - self.register.address() - } - - /// Returns the Owner of the Register. - /// - /// Return type: [PublicKey] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// # let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// # let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can use the owner. In this example, we print it out: - /// println!("REGISTER_OWNER={}", client_register.owner().to_hex()); - /// # Ok(()) - /// # } - /// ``` - pub fn owner(&self) -> PublicKey { - self.register.owner() - } - - /// Returns the Permissions of the Register. - /// - /// Return type: [Permissions] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can use the permissions. In this example, we print it out: - /// let permissions = client_register.permissions(); - /// println!("REGISTER_PERMS={:?}",permissions); - /// # Ok(()) - /// # } - /// ``` - pub fn permissions(&self) -> &Permissions { - self.register.permissions() - } - - /// Return the number of items held in the register. - /// - /// Return type: u64 - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// # let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// # let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can see the size. In this example, we print it out: - /// println!("REGISTER_SIZE={}", client_register.size()); - /// # Ok(()) - /// # } - /// ``` - pub fn size(&self) -> u64 { - self.register.size() - } - - /// Return a value corresponding to the provided 'hash', if present. - // No usages found in All Places - pub fn get(&self, hash: EntryHash) -> Result<&Entry> { - let entry = self.register.get(hash)?; - Ok(entry) - } - - /// Read the last entry, or entries when there are branches, if the register is not empty. - /// - /// Return type: [BTreeSet]<([EntryHash], [Entry])> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// // Read as bytes into the ClientRegister instance - /// let register = ClientRegister::create(client.clone(), address).read(); - /// # Ok(()) - /// # } - /// ``` - pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> { - self.register.read() - } - - /// Write a new value onto the Register atop latest value. - /// It returns an error if it finds branches in the content/entries; if it is - /// required to merge/resolve the branches, invoke the `write_merging_branches` API. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let entry = "Register entry"; - /// // Write as bytes into the ClientRegister instance - /// let mut register = ClientRegister::create(client.clone(), address).write(entry.as_bytes()); - /// # Ok(()) - /// # } - /// ``` - pub fn write(&mut self, entry: &[u8]) -> Result { - let children = self.register.read(); - if children.len() > 1 { - return Err(Error::ContentBranchDetected(children)); - } - - self.write_atop(entry, &children.into_iter().map(|(hash, _)| hash).collect()) - } - - /// Write a new value onto the Register atop of the latest value. - /// If there are any branches of content or entries, it automatically merges them. - /// Leaving the new value as a single latest value on the Register. - /// Note you can use the `write` API if you need to handle - /// content/entries branches in a different way. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let entry = "entry_input_here"; - /// let mut mutable_register = ClientRegister::create(client.clone(), address); - /// let message = "Register entry"; - /// let register = mutable_register.write_merging_branches(message.as_bytes()); - /// # Ok(()) - /// # } - /// ``` - pub fn write_merging_branches(&mut self, entry: &[u8]) -> Result { - let children: BTreeSet = self - .register - .read() - .into_iter() - .map(|(hash, _)| hash) - .collect(); - - self.write_atop(entry, &children) - } - - /// Write a new value onto the Register atop the set of branches/entries - /// referenced by the provided list of their corresponding entry hash. - /// Note you can use `write_merging_branches` API instead if you - /// want to write atop all exiting branches/entries. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'children' - [BTreeSet]<[EntryHash]> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeSet; - /// let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let mut mutable_register = ClientRegister::create(client.clone(), address); - /// let meta = "Register entry".as_bytes(); - /// let register = mutable_register.write_atop(meta, &BTreeSet::default()); - /// # Ok(()) - /// # } - /// ``` - pub fn write_atop( - &mut self, - entry: &[u8], - children: &BTreeSet, - ) -> Result { - // check permissions first - let public_key = self.client.signer_pk(); - self.register.check_user_permissions(public_key)?; - - let (entry_hash, op) = self - .register - .write(entry.into(), children, self.client.signer())?; - let cmd = RegisterCmd::Edit(op); - - self.ops.push_front(cmd); - - Ok(entry_hash) - } - - // ********* Online methods ********* - - /// Sync this Register with the replicas on the network. - /// This will optionally verify the stored Register on the network is the same as the local one. - /// If payment info is provided it won't try to make the payment. - /// - /// # Arguments - /// * 'wallet_client' - WalletClient - /// * 'verify_store' - Boolean - /// - /// Return type: - /// Result<([NanoTokens], [NanoTokens])> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeSet; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # let mut rng = rand::thread_rng(); - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// // Run sync of a Client Register instance - /// let mut register = - /// ClientRegister::create(client, address).sync(&mut wallet_client, true, None).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn sync( - &mut self, - wallet_client: &mut WalletClient, - verify_store: bool, - mut payment_info: Option<(Payment, PeerId)>, - ) -> Result<(NanoTokens, NanoTokens)> { - let addr = *self.address(); - debug!("Syncing Register at {addr:?}!"); - let mut storage_cost = NanoTokens::zero(); - let mut royalties_fees = NanoTokens::zero(); - let reg_result = if verify_store { - debug!("VERIFYING REGISTER STORED {:?}", self.address()); - - let res = if payment_info.is_some() { - // we expect this to be a _fresh_ register. - // It still could have been PUT previously, but we'll do a quick verification - // instead of thorough one. - self.client - .quickly_check_if_register_stored(*self.address()) - .await - } else { - self.client.verify_register_stored(*self.address()).await - }; - - // we need to keep the error here if verifying, so we can retry and pay for storage - // once more below - match res { - Ok(r) => Ok(r.register()?), - Err(error) => Err(error), - } - } else { - Self::get_register_from_network(&self.client, addr).await - }; - let remote_replica = match reg_result { - Ok(r) => r, - // any error here will result in a repayment of the register - // TODO: be smart about this and only pay for storage if we need to - Err(err) => { - debug!("Failed to get register: {err:?}"); - debug!("Creating Register as it doesn't exist at {addr:?}!"); - let cmd = RegisterCmd::Create { - register: self.register.clone(), - signature: self.client.sign(self.register.bytes()?), - }; - - // Let's check if the user has already paid for this address first - if payment_info.is_none() { - let net_addr = NetworkAddress::RegisterAddress(addr); - let payment_result = self.make_payment(wallet_client, &net_addr).await?; - storage_cost = payment_result.storage_cost; - royalties_fees = payment_result.royalty_fees; - - // Get payment proofs needed to publish the Register - let (payment, payee) = wallet_client.get_recent_payment_for_addr(&net_addr)?; - debug!("payments found: {payment:?}"); - payment_info = Some((payment, payee)); - } - - Self::publish_register(self.client.clone(), cmd, payment_info, verify_store) - .await?; - self.register.clone() - } - }; - self.register.merge(&remote_replica)?; - self.push(verify_store).await?; - - Ok((storage_cost, royalties_fees)) - } - - /// Push all operations made locally to the replicas of this Register on the network. - /// This optionally verifies that the stored Register is the same as our local register. - /// - /// # Arguments - /// * 'verify_store' - Boolean - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Pass the boolean value to the Client Register instance via .Push() - /// let mut binding = ClientRegister::create(client, address); - /// let register = binding.push(false); - /// # Ok(()) - /// # } - /// ``` - pub async fn push(&mut self, verify_store: bool) -> Result<()> { - let ops_len = self.ops.len(); - if ops_len > 0 { - let address = *self.address(); - debug!("Pushing {ops_len} cached Register cmds at {address}!"); - - // TODO: send them all concurrently - while let Some(cmd) = self.ops.pop_back() { - // We don't need to send the payment proofs here since - // these are all Register mutation cmds which don't require payment. - let result = - Self::publish_register(self.client.clone(), cmd.clone(), None, verify_store) - .await; - - if let Err(err) = result { - warn!("Did not push Register cmd on all nodes in the close group!: {err}"); - // We keep the cmd for next sync to retry - self.ops.push_back(cmd); - return Err(err); - } - } - - debug!("Successfully pushed {ops_len} Register cmds at {address}!"); - } - - Ok(()) - } - - /// Write a new value onto the Register atop of the latest value. - /// It returns an error if it finds branches in the content / entries. If so, then it's - /// required to merge or resolve the branches. In that case, invoke the `write_merging_branches` API. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'verify_store' - Boolean - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let meta = "Register entry".as_bytes(); - /// // Use of the 'write_online' example: - /// let mut binding = ClientRegister::create(client, address); - /// let register = binding.write_online(meta,false); - /// # Ok(()) - /// # } - /// ``` - pub async fn write_online(&mut self, entry: &[u8], verify_store: bool) -> Result<()> { - self.write(entry)?; - self.push(verify_store).await - } - - /// Write a new value onto the Register atop of the latest value. - /// If there are branches of content/entries, it will automatically merge them. - /// This will leave a single new value as the latest entry into the Register. - /// Note that you can use the `write` API if you need to handle content/entries branches in a different way. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'verify_store' - Boolean - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let meta = "Entry".as_bytes(); - /// // Use of the 'write_merging_branches_online': - /// let mut binding = ClientRegister::create(client, address); - /// let register = binding.write_merging_branches_online(meta,false); - /// # Ok(()) - /// # } - /// ``` - pub async fn write_merging_branches_online( - &mut self, - entry: &[u8], - verify_store: bool, - ) -> Result<()> { - self.write_merging_branches(entry)?; - self.push(verify_store).await - } - - /// Write a new value onto the Register atop the set of branches/entries - /// referenced by the provided list to their corresponding entry hash. - /// Note you can use `write_merging_branches` API if you - /// want to write atop of all exiting branches/entries instead. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'children' - [BTreeSet]<[EntryHash]> - /// * 'verify_store' - Boolean - /// - /// Return type: - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeSet; - /// let mut rng = rand::thread_rng(); - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let entry = "Entry".as_bytes(); - /// let tree_set = BTreeSet::new(); - /// // Use of the 'write_atop_online': - /// let mut binding = ClientRegister::create(client, address); - /// let mut register = binding.write_atop_online(entry,&tree_set,false); - /// # Ok(()) - /// # } - /// ``` - pub async fn write_atop_online( - &mut self, - entry: &[u8], - children: &BTreeSet, - verify_store: bool, - ) -> Result<()> { - self.write_atop(entry, children)?; - self.push(verify_store).await - } - - /// Access the underlying MerkleReg (e.g. for access to history) - /// NOTE: This API is unstable and may be removed in the future - pub fn merkle_reg(&self) -> &MerkleReg { - self.register.merkle_reg() - } - - // ********* Private helpers ********* - - // Make a storage payment for the provided network address - async fn make_payment( - &self, - wallet_client: &mut WalletClient, - net_addr: &NetworkAddress, - ) -> Result { - // Let's make the storage payment - let payment_result = wallet_client - .pay_for_storage(std::iter::once(net_addr.clone())) - .await?; - let cost = payment_result - .storage_cost - .checked_add(payment_result.royalty_fees) - .ok_or(Error::TotalPriceTooHigh)?; - - println!("Successfully made payment of {cost} for a Register (At a cost per record of {cost:?}.)"); - info!("Successfully made payment of {cost} for a Register (At a cost per record of {cost:?}.)"); - - if let Err(err) = wallet_client.store_local_wallet() { - warn!("Failed to store wallet with cached payment proofs: {err:?}"); - println!("Failed to store wallet with cached payment proofs: {err:?}"); - } else { - println!( - "Successfully stored wallet with cached payment proofs, and new balance {}.", - wallet_client.balance() - ); - info!( - "Successfully stored wallet with cached payment proofs, and new balance {}.", - wallet_client.balance() - ); - } - - Ok(payment_result) - } - - /// Publish a `Register` command on the network. - /// If `verify_store` is true, it will verify the Register was stored on the network. - /// Optionally contains the Payment and the PeerId that we paid to. - pub(crate) async fn publish_register( - client: Client, - cmd: RegisterCmd, - payment: Option<(Payment, PeerId)>, - verify_store: bool, - ) -> Result<()> { - let cmd_dst = cmd.dst(); - debug!("Querying existing Register for cmd: {cmd_dst:?}"); - let network_reg = client.get_signed_register_from_network(cmd.dst()).await; - - debug!("Publishing Register cmd: {cmd_dst:?}"); - let register = match cmd { - RegisterCmd::Create { - register, - signature, - } => { - if let Ok(existing_reg) = network_reg { - if existing_reg.owner() != register.owner() { - return Err(ProtocolError::RegisterAlreadyClaimed(existing_reg.owner()))?; - } - } - SignedRegister::new(register, signature) - } - RegisterCmd::Edit(op) => { - let mut reg = network_reg?; - reg.add_op(op)?; - reg - } - }; - - let network_address = NetworkAddress::from_register_address(*register.address()); - let key = network_address.to_record_key(); - let (record, payee) = match payment { - Some((payment, payee)) => { - let record = Record { - key: key.clone(), - value: try_serialize_record( - &(payment, ®ister), - RecordKind::RegisterWithPayment, - )? - .to_vec(), - publisher: None, - expires: None, - }; - (record, Some(vec![payee])) - } - None => { - let record = Record { - key: key.clone(), - value: try_serialize_record(®ister, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - (record, None) - } - }; - - let (record_to_verify, expected_holders) = if verify_store { - let expected_holders: HashSet<_> = client - .network - .get_closest_peers(&network_address, true) - .await? - .iter() - .cloned() - .collect(); - ( - Some(Record { - key, - value: try_serialize_record(®ister, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }), - expected_holders, - ) - } else { - (None, Default::default()) - }; - - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: Some(RetryStrategy::Quick), - target_record: record_to_verify, - expected_holders, - }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: Some(RetryStrategy::Balanced), - use_put_record_to: payee, - verification: Some((VerificationKind::Network, verification_cfg)), - }; - - // Register edits might exist, so we cannot be sure that just because we get a record back that this should fail - Ok(client.network.put_record(record, &put_cfg).await?) - } - - /// Retrieve a `Register` from the Network. - async fn get_register_from_network( - client: &Client, - address: RegisterAddress, - ) -> Result { - debug!("Retrieving Register from: {address}"); - let reg = client.get_signed_register_from_network(address).await?; - reg.verify_with_address(address)?; - Ok(reg.register()?) - } -} diff --git a/sn_client/src/test_utils.rs b/sn_client/src/test_utils.rs deleted file mode 100644 index bce997d510..0000000000 --- a/sn_client/src/test_utils.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - acc_packet::{create_faucet_account_and_wallet, load_account_wallet_or_create_with_mnemonic}, - send, Client, WalletClient, -}; -use sn_peers_acquisition::parse_peer_addr; -use sn_protocol::{storage::Chunk, NetworkAddress}; -use sn_transfers::{HotWallet, NanoTokens}; - -use bls::SecretKey; -use bytes::Bytes; -use eyre::{bail, Result}; -use rand::distributions::{Distribution, Standard}; -use std::path::Path; -use tokio::{ - sync::Mutex, - time::{Duration, Instant}, -}; -use tracing::{info, warn}; - -/// 100 SNT is the amount `get_funded_wallet` funds the created wallet with. -pub const AMOUNT_TO_FUND_WALLETS: u64 = 100 * 1_000_000_000; - -// The number of times to try to load the faucet wallet -const LOAD_FAUCET_WALLET_RETRIES: usize = 6; - -// mutex to restrict access to faucet wallet from concurrent tests -static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); - -/// Get a new Client for testing -pub async fn get_new_client(owner_sk: SecretKey) -> Result { - let bootstrap_peers = if cfg!(feature = "local-discovery") { - None - } else { - match std::env::var("SAFE_PEERS") { - Ok(str) => match parse_peer_addr(&str) { - Ok(peer) => Some(vec![peer]), - Err(err) => bail!("Can't parse SAFE_PEERS {str:?} with error {err:?}"), - }, - Err(err) => bail!("Can't get env var SAFE_PEERS with error {err:?}"), - } - }; - - println!("Client bootstrap with peer {bootstrap_peers:?}"); - let client = Client::new(owner_sk, bootstrap_peers, None, None).await?; - Ok(client) -} - -/// Generate a Chunk with random bytes -pub fn random_file_chunk() -> Chunk { - let mut rng = rand::thread_rng(); - let random_content: Vec = >::sample_iter(Standard, &mut rng) - .take(100) - .collect(); - Chunk::new(Bytes::from(random_content)) -} - -/// Creates and funds a new hot-wallet at the provided path -pub async fn get_funded_wallet(client: &Client, wallet_dir: &Path) -> Result { - let wallet_balance = NanoTokens::from(AMOUNT_TO_FUND_WALLETS); - let _guard = FAUCET_WALLET_MUTEX.lock().await; - let from_faucet_wallet = load_faucet_wallet().await?; - - let mut local_wallet = load_account_wallet_or_create_with_mnemonic(wallet_dir, None) - .expect("Wallet shall be successfully created."); - - println!("Getting {wallet_balance} tokens from the faucet..."); - info!("Getting {wallet_balance} tokens from the faucet..."); - let tokens = send( - from_faucet_wallet, - wallet_balance, - local_wallet.address(), - client, - true, - ) - .await?; - - println!("Verifying the transfer from faucet..."); - info!("Verifying the transfer from faucet..."); - client.verify_cashnote(&tokens).await?; - local_wallet.deposit_and_store_to_disk(&vec![tokens])?; - assert_eq!(local_wallet.balance(), wallet_balance); - println!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); - info!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); - - Ok(local_wallet) -} - -/// Pay the network for the provided list of storage addresses. -pub async fn pay_for_storage( - client: &Client, - wallet_dir: &Path, - addrs2pay: Vec, -) -> Result<()> { - let wallet = load_account_wallet_or_create_with_mnemonic(wallet_dir, None)?; - - let mut wallet_client = WalletClient::new(client.clone(), wallet); - let _ = wallet_client.pay_for_storage(addrs2pay.into_iter()).await?; - Ok(()) -} - -async fn load_faucet_wallet() -> Result { - info!("Loading faucet wallet..."); - let now = Instant::now(); - for attempt in 1..LOAD_FAUCET_WALLET_RETRIES + 1 { - let faucet_wallet = create_faucet_account_and_wallet(); - - let faucet_balance = faucet_wallet.balance(); - if !faucet_balance.is_zero() { - info!("Loaded faucet wallet after {:?}", now.elapsed()); - return Ok(faucet_wallet); - } - tokio::time::sleep(Duration::from_secs(1)).await; - warn!("The faucet wallet is empty. Attempts: {attempt}/{LOAD_FAUCET_WALLET_RETRIES}") - } - bail!("The faucet wallet is empty even after {LOAD_FAUCET_WALLET_RETRIES} retries. Bailing after {:?}. Check the faucet_server logs.", now.elapsed()); -} diff --git a/sn_client/src/uploader/mod.rs b/sn_client/src/uploader/mod.rs deleted file mode 100644 index 8b8d6005fa..0000000000 --- a/sn_client/src/uploader/mod.rs +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#[cfg(test)] -mod tests; -mod upload; - -use self::upload::{start_upload, InnerUploader, MAX_REPAYMENTS_PER_FAILED_ITEM}; -use crate::{Client, ClientRegister, Error, Result, BATCH_SIZE}; -use itertools::Either; -use sn_networking::PayeeQuote; -use sn_protocol::{ - storage::{Chunk, ChunkAddress, RetryStrategy}, - NetworkAddress, -}; -use sn_registers::{Register, RegisterAddress}; -use sn_transfers::{NanoTokens, WalletApi}; -use std::{ - collections::{BTreeMap, BTreeSet}, - fmt::Debug, - path::PathBuf, -}; -use tokio::sync::mpsc; -use xor_name::XorName; - -/// The set of options to pass into the `Uploader` -#[derive(Debug, Clone, Copy)] -pub struct UploadCfg { - pub batch_size: usize, - pub verify_store: bool, - pub show_holders: bool, - pub retry_strategy: RetryStrategy, - pub max_repayments_for_failed_data: usize, // we want people to specify an explicit limit here. - pub collect_registers: bool, -} - -impl Default for UploadCfg { - fn default() -> Self { - Self { - batch_size: BATCH_SIZE, - verify_store: true, - show_holders: false, - retry_strategy: RetryStrategy::Balanced, - max_repayments_for_failed_data: MAX_REPAYMENTS_PER_FAILED_ITEM, - collect_registers: false, - } - } -} - -/// The result of a successful upload. -#[derive(Debug, Clone)] -pub struct UploadSummary { - pub storage_cost: NanoTokens, - pub royalty_fees: NanoTokens, - pub final_balance: NanoTokens, - pub uploaded_addresses: BTreeSet, - pub uploaded_registers: BTreeMap, - pub uploaded_count: usize, - pub skipped_count: usize, -} - -impl UploadSummary { - /// Merge two UploadSummary together. - pub fn merge(mut self, other: Self) -> Result { - self.uploaded_addresses.extend(other.uploaded_addresses); - self.uploaded_registers.extend(other.uploaded_registers); - - let summary = Self { - storage_cost: self - .storage_cost - .checked_add(other.storage_cost) - .ok_or(Error::NumericOverflow)?, - royalty_fees: self - .royalty_fees - .checked_add(other.royalty_fees) - .ok_or(Error::NumericOverflow)?, - final_balance: self - .final_balance - .checked_add(other.final_balance) - .ok_or(Error::NumericOverflow)?, - uploaded_addresses: self.uploaded_addresses, - uploaded_registers: self.uploaded_registers, - uploaded_count: self.uploaded_count + other.uploaded_count, - skipped_count: self.skipped_count + other.skipped_count, - }; - Ok(summary) - } -} - -#[derive(Debug, Clone)] -/// The events emitted from the upload process. -pub enum UploadEvent { - /// Uploaded a record to the network. - ChunkUploaded(ChunkAddress), - /// Uploaded a Register to the network. - /// The returned register is just the passed in register. - RegisterUploaded(ClientRegister), - /// - /// The Chunk already exists in the network. No payments were made. - ChunkAlreadyExistsInNetwork(ChunkAddress), - /// The Register already exists in the network. The locally register changes were pushed to the network. - /// No payments were made. - /// The returned register contains the remote replica merged with the passed in register. - RegisterUpdated(ClientRegister), - /// Payment for a batch of records has been made. - PaymentMade { - storage_cost: NanoTokens, - royalty_fees: NanoTokens, - new_balance: NanoTokens, - }, - /// The upload process has terminated with an error. - // Note: We cannot send the Error enum as it does not implement Clone. So we cannot even do Result if - // we also want to return this error from the function. - Error, -} - -pub struct Uploader { - // Has to be stored as an Option as we have to take ownership of inner during the upload. - inner: Option, -} - -impl Uploader { - /// Start the upload process. - pub async fn start_upload(mut self) -> Result { - let event_sender = self - .inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .event_sender - .clone(); - match start_upload(Box::new(self)).await { - Err(err) => { - if let Some(event_sender) = event_sender { - if let Err(err) = event_sender.send(UploadEvent::Error).await { - error!("Error while emitting event: {err:?}"); - } - } - Err(err) - } - Ok(summary) => Ok(summary), - } - } - - /// Creates a new instance of `Uploader` with the default configuration. - /// To modify the configuration, use the provided setter methods (`set_...` functions). - // NOTE: Self has to be constructed only using this method. We expect `Self::inner` is present everywhere. - pub fn new(client: Client, root_dir: PathBuf) -> Self { - Self { - inner: Some(InnerUploader::new(client, root_dir)), - } - } - - /// Update all the configurations by passing the `UploadCfg` struct - pub fn set_upload_cfg(&mut self, cfg: UploadCfg) { - // Self can only be constructed with new(), which will set inner to InnerUploader always. - // So it is okay to call unwrap here. - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_cfg(cfg); - } - - /// Sets the default batch size that determines the number of data that are processed in parallel. - /// - /// By default, this option is set to the constant `BATCH_SIZE: usize = 16`. - pub fn set_batch_size(&mut self, batch_size: usize) { - // Self can only be constructed with new(), which will set inner to InnerUploader always. - // So it is okay to call unwrap here. - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_batch_size(batch_size); - } - - /// Sets the option to verify the data after they have been uploaded. - /// - /// By default, this option is set to true. - pub fn set_verify_store(&mut self, verify_store: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_verify_store(verify_store); - } - - /// Sets the option to display the holders that are expected to be holding the data during verification. - /// - /// By default, this option is set to false. - pub fn set_show_holders(&mut self, show_holders: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_show_holders(show_holders); - } - - /// Sets the RetryStrategy to increase the re-try during the GetStoreCost & Upload tasks. - /// This does not affect the retries during the Payment task. Use `set_max_repayments_for_failed_data` to - /// configure the re-payment attempts. - /// - /// By default, this option is set to RetryStrategy::Quick - pub fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_retry_strategy(retry_strategy); - } - - /// Sets the maximum number of repayments to perform if the initial payment failed. - /// NOTE: This creates an extra Spend and uses the wallet funds. - /// - /// By default, this option is set to 1 retry. - pub fn set_max_repayments_for_failed_data(&mut self, retries: usize) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_max_repayments_for_failed_data(retries); - } - - /// Enables the uploader to return all the registers that were Uploaded or Updated. - /// The registers are emitted through the event channel whenever they're completed, but this returns them - /// through the UploadSummary when the whole upload process completes. - /// - /// By default, this option is set to False - pub fn set_collect_registers(&mut self, collect_registers: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_collect_registers(collect_registers); - } - - /// Returns a receiver for UploadEvent. - /// This method is optional and the upload process can be performed without it. - pub fn get_event_receiver(&mut self) -> mpsc::Receiver { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .get_event_receiver() - } - - /// Insert a list of chunk paths to upload to upload. - pub fn insert_chunk_paths(&mut self, chunks: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_chunk_paths(chunks); - } - - /// Insert a list of chunks to upload to upload. - pub fn insert_chunks(&mut self, chunks: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_chunks(chunks); - } - - /// Insert a list of registers to upload. - pub fn insert_register(&mut self, registers: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_register(registers); - } -} - -// ======= Private ======== - -/// An interface to make the testing easier by not interacting with the network. -trait UploaderInterface: Send + Sync { - fn take_inner_uploader(&mut self) -> InnerUploader; - - // Mutable reference is used in tests. - fn submit_get_register_task( - &mut self, - client: Client, - reg_addr: RegisterAddress, - task_result_sender: mpsc::Sender, - ); - - fn submit_push_register_task( - &mut self, - upload_item: UploadItem, - verify_store: bool, - task_result_sender: mpsc::Sender, - ); - - #[expect(clippy::too_many_arguments)] - fn submit_get_store_cost_task( - &mut self, - client: Client, - wallet_api: WalletApi, - xorname: XorName, - address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - task_result_sender: mpsc::Sender, - ); - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - make_payment_sender: mpsc::Sender)>>, - ); - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - client: Client, - wallet_api: WalletApi, - verify_store: bool, - retry_strategy: RetryStrategy, - task_result_sender: mpsc::Sender, - ); -} - -// Configuration functions are used in tests. So these are defined here and re-used inside `Uploader` -impl InnerUploader { - pub(super) fn set_cfg(&mut self, cfg: UploadCfg) { - self.cfg = cfg; - } - - pub(super) fn set_batch_size(&mut self, batch_size: usize) { - self.cfg.batch_size = batch_size; - } - - pub(super) fn set_verify_store(&mut self, verify_store: bool) { - self.cfg.verify_store = verify_store; - } - - pub(super) fn set_show_holders(&mut self, show_holders: bool) { - self.cfg.show_holders = show_holders; - } - - pub(super) fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { - self.cfg.retry_strategy = retry_strategy; - } - - pub(super) fn set_max_repayments_for_failed_data(&mut self, retries: usize) { - self.cfg.max_repayments_for_failed_data = retries; - } - - pub(super) fn set_collect_registers(&mut self, collect_registers: bool) { - self.cfg.collect_registers = collect_registers; - } - - pub(super) fn get_event_receiver(&mut self) -> mpsc::Receiver { - let (tx, rx) = mpsc::channel(100); - self.event_sender = Some(tx); - rx - } - - pub(super) fn insert_chunk_paths( - &mut self, - chunks: impl IntoIterator, - ) { - self.all_upload_items - .extend(chunks.into_iter().map(|(xorname, path)| { - let item = UploadItem::Chunk { - address: ChunkAddress::new(xorname), - chunk: Either::Right(path), - }; - (xorname, item) - })); - } - - pub(super) fn insert_chunks(&mut self, chunks: impl IntoIterator) { - self.all_upload_items - .extend(chunks.into_iter().map(|chunk| { - let xorname = *chunk.name(); - let item = UploadItem::Chunk { - address: *chunk.address(), - chunk: Either::Left(chunk), - }; - (xorname, item) - })); - } - - pub(super) fn insert_register(&mut self, registers: impl IntoIterator) { - self.all_upload_items - .extend(registers.into_iter().map(|reg| { - let address = *reg.address(); - let item = UploadItem::Register { address, reg }; - (address.xorname(), item) - })); - } -} - -#[derive(Debug, Clone)] -enum UploadItem { - Chunk { - address: ChunkAddress, - // Either the actual chunk or the path to the chunk. - chunk: Either, - }, - Register { - address: RegisterAddress, - reg: ClientRegister, - }, -} - -impl UploadItem { - fn address(&self) -> NetworkAddress { - match self { - Self::Chunk { address, .. } => NetworkAddress::from_chunk_address(*address), - Self::Register { address, .. } => NetworkAddress::from_register_address(*address), - } - } - - fn xorname(&self) -> XorName { - match self { - UploadItem::Chunk { address, .. } => *address.xorname(), - UploadItem::Register { address, .. } => address.xorname(), - } - } -} - -#[derive(Debug)] -enum TaskResult { - GetRegisterFromNetworkOk { - remote_register: Register, - }, - GetRegisterFromNetworkErr(XorName), - PushRegisterOk { - updated_register: ClientRegister, - }, - PushRegisterErr(XorName), - GetStoreCostOk { - xorname: XorName, - quote: Box, - }, - GetStoreCostErr { - xorname: XorName, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_reached: bool, - }, - MakePaymentsOk { - paid_xornames: Vec, - storage_cost: NanoTokens, - royalty_fees: NanoTokens, - new_balance: NanoTokens, - }, - MakePaymentsErr { - failed_xornames: Vec<(XorName, Box)>, - insufficient_balance: Option<(NanoTokens, NanoTokens)>, - }, - UploadOk(XorName), - UploadErr { - xorname: XorName, - }, -} - -#[derive(Debug, Clone)] -enum GetStoreCostStrategy { - /// Selects the PeerId with the lowest quote - Cheapest, - /// Selects the cheapest PeerId that we have not made payment to. - SelectDifferentPayee, -} diff --git a/sn_client/src/uploader/tests/mod.rs b/sn_client/src/uploader/tests/mod.rs deleted file mode 100644 index 75916bbb97..0000000000 --- a/sn_client/src/uploader/tests/mod.rs +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod setup; - -use crate::{ - uploader::tests::setup::{ - get_dummy_chunk_paths, get_dummy_registers, get_inner_uploader, start_uploading_with_steps, - TestSteps, - }, - Error as ClientError, UploadEvent, -}; -use assert_matches::assert_matches; -use eyre::Result; -use sn_logging::LogBuilder; -use std::collections::VecDeque; -use tempfile::tempdir; - -// ===== HAPPY PATH ======= - -/// 1. Chunk: if cost =0, then chunk is present in the network. -#[tokio::test] -async fn chunk_that_already_exists_in_the_network_should_return_zero_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![TestSteps::GetStoreCostOk { - trigger_zero_cost: true, - assert_select_different_payee: false, - }]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 1); - assert_matches!(events[0], UploadEvent::ChunkAlreadyExistsInNetwork(_)); - Ok(()) -} - -/// 2. Chunk: if cost !=0, then make payment upload to the network. -#[tokio::test] -async fn chunk_should_be_paid_for_and_uploaded_if_cost_is_not_zero() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::ChunkUploaded(..)); - Ok(()) -} - -/// 3. Register: if GET register = ok, then merge and push the register. -#[tokio::test] -async fn register_should_be_merged_and_pushed_if_it_already_exists_in_the_network() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![TestSteps::GetRegisterOk, TestSteps::PushRegisterOk]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 1); - assert_matches!(events[0], UploadEvent::RegisterUpdated { .. }); - Ok(()) -} - -/// 4. Register: if Get register = err, then get store cost and upload. -#[tokio::test] -async fn register_should_be_paid_and_uploaded_if_it_does_not_exists() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - // todo: what if cost = 0 even after GetRegister returns error. check that - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::RegisterUploaded(..)); - Ok(()) -} - -// ===== REPAYMENTS ====== - -/// 1. Chunks: if upload task fails > threshold, then get store cost should be triggered with SelectDifferentStrategy -/// and then uploaded. -#[tokio::test] -async fn chunks_should_perform_repayment_if_the_upload_fails_multiple_times() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 3); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - assert_matches!(events[2], UploadEvent::ChunkUploaded(..)); - Ok(()) -} - -/// 2. Register: if upload task fails > threshold, then get store cost should be triggered with SelectDifferentStrategy -/// and then uploaded. -#[tokio::test] -async fn registers_should_perform_repayment_if_the_upload_fails_multiple_times() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 3); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - assert_matches!(events[2], UploadEvent::RegisterUploaded(..)); - Ok(()) -} - -// ===== ERRORS ======= -/// 1. Registers: Multiple PushRegisterErr should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn register_upload_should_error_out_if_there_are_multiple_push_failures() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterOk, - TestSteps::PushRegisterErr, - TestSteps::PushRegisterErr, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 2. Chunk: Multiple errors during get store cost should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn chunk_should_error_out_if_there_are_multiple_errors_during_get_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 3. Register: Multiple errors during get store cost should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn register_should_error_out_if_there_are_multiple_errors_during_get_store_cost() -> Result<()> -{ - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 4. Chunk: Multiple errors during make payment should result in Error::SequentialUploadPaymentError -#[tokio::test] -async fn chunk_should_error_out_if_there_are_multiple_errors_during_make_payment() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentErr, - TestSteps::MakePaymentErr, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialUploadPaymentError) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 5. Register: Multiple errors during make payment should result in Error::SequentialUploadPaymentError -#[tokio::test] -async fn register_should_error_out_if_there_are_multiple_errors_during_make_payment() -> Result<()> -{ - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentErr, - TestSteps::MakePaymentErr, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialUploadPaymentError) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -// 6: Chunks + Registers: if the number of repayments exceed a threshold, it should return MaximumRepaymentsReached error. -#[tokio::test] -async fn maximum_repayment_error_should_be_triggered_during_get_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - // initial payment done - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - // first repayment - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - // thus after reaching max repayments, we should error out during get store cost. - TestSteps::GetStoreCostErr { - assert_select_different_payee: true, - }, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::UploadFailedWithMaximumRepaymentsReached { .. }) - ); - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - Ok(()) -} diff --git a/sn_client/src/uploader/tests/setup.rs b/sn_client/src/uploader/tests/setup.rs deleted file mode 100644 index 328489c24d..0000000000 --- a/sn_client/src/uploader/tests/setup.rs +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - uploader::{ - upload::{start_upload, InnerUploader}, - GetStoreCostStrategy, TaskResult, UploadItem, UploaderInterface, - }, - ClientRegister, UploadEvent, -}; -use crate::{Client, Result as ClientResult, UploadSummary}; -use assert_matches::assert_matches; -use bls::SecretKey; -use eyre::Result; -use libp2p::PeerId; -use libp2p_identity::Keypair; -use rand::thread_rng; -use sn_networking::{NetworkBuilder, PayeeQuote}; -use sn_protocol::{storage::RetryStrategy, NetworkAddress}; -use sn_registers::{Register, RegisterAddress}; -use sn_transfers::{MainSecretKey, NanoTokens, PaymentQuote, WalletApi}; -use std::{ - collections::{BTreeMap, VecDeque}, - path::PathBuf, - sync::Arc, -}; -use tokio::{runtime::Handle, sync::mpsc, task::JoinHandle}; -use xor_name::XorName; - -struct TestUploader { - inner: Option, - test_steps: VecDeque, - task_result_sender: mpsc::Sender, - - // test states - make_payment_collector: Vec<(XorName, Box)>, - payments_made_per_xorname: BTreeMap, - batch_size: usize, -} - -impl UploaderInterface for TestUploader { - fn take_inner_uploader(&mut self) -> InnerUploader { - self.inner.take().unwrap() - } - - fn submit_get_register_task( - &mut self, - _client: Client, - reg_addr: RegisterAddress, - _task_result_sender: mpsc::Sender, - ) { - let xorname = reg_addr.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a GetRegister step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_get_register called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_get_register called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::GetRegisterOk => { - handle.spawn(async move { - let reg = Register::test_new_from_address(reg_addr); - - task_result_sender - .send(TaskResult::GetRegisterFromNetworkOk { - remote_register: reg, - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::GetRegisterErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetRegisterFromNetworkErr(xorname)) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected GetRegister step. Got: {con:?}"), - } - } - - fn submit_push_register_task( - &mut self, - upload_item: UploadItem, - _verify_store: bool, - _task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a PushRegister step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_push_register called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_push_register called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::PushRegisterOk => { - handle.spawn(async move { - let updated_register = match upload_item { - UploadItem::Register { reg, .. } => reg, - _ => panic!("Expected UploadItem::Register"), - }; - task_result_sender - .send(TaskResult::PushRegisterOk { - // this register is just used for returning. - updated_register, - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::PushRegisterErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::PushRegisterErr(xorname)) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected PushRegister step. Got: {con:?}"), - } - } - - fn submit_get_store_cost_task( - &mut self, - _client: Client, - _wallet_api: WalletApi, - xorname: XorName, - _address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - _task_result_sender: mpsc::Sender, - ) { - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a GetStoreCost step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_get_store_cost called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_get_store_cost called for: {xorname:?}. Step to execute: {step:?}"); - - let has_max_payments_reached_closure = - |get_store_cost_strategy: &GetStoreCostStrategy| -> bool { - match get_store_cost_strategy { - GetStoreCostStrategy::SelectDifferentPayee => { - if let Some(n_payments) = self.payments_made_per_xorname.get(&xorname) { - InnerUploader::have_we_reached_max_repayments( - *n_payments, - max_repayments_for_failed_data, - ) - } else { - false - } - } - _ => false, - } - }; - - // if select different payee, then it can possibly error out if max_repayments have been reached. - // then the step should've been a GetStoreCostErr. - if has_max_payments_reached_closure(&get_store_cost_strategy) { - assert_matches!(step, TestSteps::GetStoreCostErr { .. }, "Max repayments have been reached, so we expect a GetStoreCostErr, not GetStoreCostOk"); - } - - match step { - TestSteps::GetStoreCostOk { - trigger_zero_cost, - assert_select_different_payee, - } => { - // Make sure that the received strategy is the one defined in the step. - assert!(match get_store_cost_strategy { - // match here to not miss out on any new strategies. - GetStoreCostStrategy::Cheapest => !assert_select_different_payee, - GetStoreCostStrategy::SelectDifferentPayee { .. } => - assert_select_different_payee, - }); - - let mut quote = PaymentQuote::zero(); - if !trigger_zero_cost { - quote.cost = NanoTokens::from(10); - } - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetStoreCostOk { - xorname, - quote: Box::new(( - PeerId::random(), - MainSecretKey::random().main_pubkey(), - quote, - )), - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::GetStoreCostErr { - assert_select_different_payee, - } => { - // Make sure that the received strategy is the one defined in the step. - assert!(match get_store_cost_strategy { - // match here to not miss out on any new strategies. - GetStoreCostStrategy::Cheapest => !assert_select_different_payee, - GetStoreCostStrategy::SelectDifferentPayee { .. } => - assert_select_different_payee, - }); - let max_repayments_reached = - has_max_payments_reached_closure(&get_store_cost_strategy); - - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected GetStoreCost step. Got: {con:?}"), - } - } - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - _make_payment_sender: mpsc::Sender)>>, - ) { - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a MakePayment step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - match &to_send { - Some((upload_item, quote)) => { - let xorname = upload_item.xorname(); - println!("spawn_make_payment called for: {xorname:?}. Step to execute: {step:?}"); - info!( - "TEST: spawn_make_payment called for: {xorname:?}. Step to execute: {step:?}" - ); - - self.make_payment_collector - .push((upload_item.xorname(), quote.clone())); - } - None => { - println!( - "spawn_make_payment called with force make payment. Step to execute: {step:?}" - ); - info!("TEST: spawn_make_payment called with force make payment. Step to execute: {step:?}"); - } - } - - // gotta collect batch size before sending task result. - let _make_payment = self.make_payment_collector.len() >= self.batch_size - || (to_send.is_none() && !self.make_payment_collector.is_empty()); - - match step { - // TestSteps::MakePaymentJustCollectItem => { - // // The test expected for us to just collect item, but if the logic wants us to make payment, then it as - // // error - // assert!(!make_payment); - // } - TestSteps::MakePaymentOk => { - let paid_xornames = std::mem::take(&mut self.make_payment_collector) - .into_iter() - .map(|(xorname, _)| xorname) - .collect::>(); - // track the payments per xorname - for xorname in paid_xornames.iter() { - let entry = self.payments_made_per_xorname.entry(*xorname).or_insert(0); - *entry += 1; - } - let batch_size = self.batch_size; - - handle.spawn(async move { - task_result_sender - .send(TaskResult::MakePaymentsOk { - paid_xornames, - storage_cost: NanoTokens::from(batch_size as u64 * 10), - royalty_fees: NanoTokens::from(batch_size as u64 * 3), - new_balance: NanoTokens::from(batch_size as u64 * 1000), - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::MakePaymentErr => { - let failed_xornames = std::mem::take(&mut self.make_payment_collector); - - handle.spawn(async move { - task_result_sender - .send(TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance: None, - }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected MakePayment step. Got: {con:?}"), - } - } - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - _client: Client, - _wallet_api: WalletApi, - _verify_store: bool, - _retry_strategy: RetryStrategy, - _task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a UploadItem step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_upload_item called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_upload_item called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::UploadItemOk => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::UploadOk(xorname)) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::UploadItemErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::UploadErr { xorname }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected UploadItem step. Got: {con:?}"), - } - } -} - -#[derive(Debug, Clone)] -pub enum TestSteps { - GetRegisterOk, - GetRegisterErr, - PushRegisterOk, - PushRegisterErr, - GetStoreCostOk { - trigger_zero_cost: bool, - assert_select_different_payee: bool, - }, - GetStoreCostErr { - assert_select_different_payee: bool, - }, - // MakePaymentJustCollectItem, - MakePaymentOk, - MakePaymentErr, - UploadItemOk, - UploadItemErr, -} - -pub fn get_inner_uploader(root_dir: PathBuf) -> Result<(InnerUploader, mpsc::Sender)> { - let client = build_unconnected_client(root_dir.clone())?; - - let mut inner = InnerUploader::new(client, root_dir); - let (task_result_sender, task_result_receiver) = mpsc::channel(100); - inner.testing_task_channels = Some((task_result_sender.clone(), task_result_receiver)); - - Ok((inner, task_result_sender)) -} - -// Spawns two tasks. One is the actual upload task that will return an UploadStat when completed. -// The other is a one to collect all the UploadEvent emitted by the previous task. -pub fn start_uploading_with_steps( - mut inner_uploader: InnerUploader, - test_steps: VecDeque, - task_result_sender: mpsc::Sender, -) -> ( - JoinHandle>, - JoinHandle>, -) { - let batch_size = inner_uploader.cfg.batch_size; - let mut upload_event_rx = inner_uploader.get_event_receiver(); - - let upload_handle = tokio::spawn(start_upload(Box::new(TestUploader { - inner: Some(inner_uploader), - test_steps, - task_result_sender, - make_payment_collector: Default::default(), - payments_made_per_xorname: Default::default(), - batch_size, - }))); - - let event_handle = tokio::spawn(async move { - let mut events = vec![]; - while let Some(event) = upload_event_rx.recv().await { - events.push(event); - } - events - }); - - (upload_handle, event_handle) -} - -// Collect all the upload events into a list - -// Build a very simple client struct for testing. This does not connect to any network. -// The UploaderInterface eliminates the need for direct networking in tests. -pub fn build_unconnected_client(root_dir: PathBuf) -> Result { - let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), true, root_dir); - let (network, ..) = network_builder.build_client()?; - let client = Client { - network, - events_broadcaster: Default::default(), - signer: Arc::new(SecretKey::random()), - }; - Ok(client) -} - -// We don't perform any networking, so the paths can be dummy ones. -pub fn get_dummy_chunk_paths(num: usize, temp_dir: PathBuf) -> Vec<(XorName, PathBuf)> { - let mut rng = thread_rng(); - let mut chunks = Vec::with_capacity(num); - for _ in 0..num { - chunks.push((XorName::random(&mut rng), temp_dir.clone())); - } - chunks -} - -pub fn get_dummy_registers(num: usize, client: Client) -> Vec { - let mut rng = thread_rng(); - let mut registers = Vec::with_capacity(num); - for _ in 0..num { - let mut client_reg = ClientRegister::create(client.clone(), XorName::random(&mut rng)); - // test_new_from_address that is used during get_register, uses AnyoneCanWrite permission, so use the same here - client_reg.register = Register::test_new_from_address(*client_reg.address()); - - registers.push(client_reg); - } - registers -} diff --git a/sn_client/src/uploader/upload.rs b/sn_client/src/uploader/upload.rs deleted file mode 100644 index 0fdc4280de..0000000000 --- a/sn_client/src/uploader/upload.rs +++ /dev/null @@ -1,1099 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - GetStoreCostStrategy, TaskResult, UploadCfg, UploadEvent, UploadItem, UploadSummary, - UploaderInterface, -}; -use crate::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, - transfers::{TransferError, WalletError}, - Client, ClientRegister, Error as ClientError, Result, Uploader, WalletClient, -}; -use bytes::Bytes; -use itertools::Either; -use libp2p::PeerId; -use sn_networking::PayeeQuote; -use sn_protocol::{ - messages::RegisterCmd, - storage::{Chunk, RetryStrategy}, - NetworkAddress, -}; -use sn_registers::{Register, RegisterAddress}; -use sn_transfers::{NanoTokens, WalletApi}; -use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, - path::{Path, PathBuf}, -}; -use tiny_keccak::{Hasher, Sha3}; -use tokio::sync::mpsc; -use xor_name::XorName; - -/// The number of repayments to attempt for a failed item before returning an error. -/// If value = 1, we do an initial payment & 1 repayment. Thus we make a max 2 payments per data item. -#[cfg(not(test))] -pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 3; -#[cfg(test)] -pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 1; - -/// The maximum number of sequential payment failures before aborting the upload process. -#[cfg(not(test))] -const MAX_SEQUENTIAL_PAYMENT_FAILS: usize = 3; -#[cfg(test)] -const MAX_SEQUENTIAL_PAYMENT_FAILS: usize = 1; - -/// The maximum number of sequential network failures before aborting the upload process. -// todo: use uploader.retry_strategy.get_count() instead. -#[cfg(not(test))] -const MAX_SEQUENTIAL_NETWORK_ERRORS: usize = 32; -#[cfg(test)] -const MAX_SEQUENTIAL_NETWORK_ERRORS: usize = 1; - -/// The number of upload failures for a single data item before -#[cfg(not(test))] -const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 3; -#[cfg(test)] -const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 1; - -// TODO: -// 1. log whenever we insert/remove items. i.e., don't ignore values with `let _` - -/// The main loop that performs the upload process. -/// An interface is passed here for easy testing. -pub(super) async fn start_upload( - mut interface: Box, -) -> Result { - let mut uploader = interface.take_inner_uploader(); - // Take out the testing task senders if any. This is only set for tests. - let (task_result_sender, mut task_result_receiver) = - if let Some(channels) = uploader.testing_task_channels.take() { - channels - } else { - // 6 because of the 6 pipelines, 1 for redundancy. - mpsc::channel(uploader.cfg.batch_size * 6 + 1) - }; - let (make_payment_sender, make_payment_receiver) = mpsc::channel(uploader.cfg.batch_size); - - uploader.start_make_payment_processing_loop( - make_payment_receiver, - task_result_sender.clone(), - uploader.cfg.batch_size, - )?; - - // chunks can be pushed to pending_get_store_cost directly - uploader.pending_to_get_store_cost = uploader - .all_upload_items - .iter() - .filter_map(|(xorname, item)| { - if let UploadItem::Chunk { .. } = item { - Some((*xorname, GetStoreCostStrategy::Cheapest)) - } else { - None - } - }) - .collect(); - - // registers have to be verified + merged with remote replica, so we have to fetch it first. - uploader.pending_to_get_register = uploader - .all_upload_items - .iter() - .filter_map(|(_xorname, item)| { - if let UploadItem::Register { address, .. } = item { - Some(*address) - } else { - None - } - }) - .collect(); - - loop { - // Break if we have uploaded all the items. - // The loop also breaks if we fail to get_store_cost / make payment / upload for n consecutive times. - if uploader.all_upload_items.is_empty() { - debug!("Upload items are empty, exiting main upload loop."); - // To avoid empty final_balance when all items are skipped. - uploader.upload_final_balance = - InnerUploader::load_wallet_client(uploader.client.clone(), &uploader.root_dir)? - .balance(); - #[cfg(test)] - trace!("UPLOADER STATE: finished uploading all items {uploader:?}"); - let summary = UploadSummary { - storage_cost: uploader.upload_storage_cost, - royalty_fees: uploader.upload_royalty_fees, - final_balance: uploader.upload_final_balance, - uploaded_addresses: uploader.uploaded_addresses, - uploaded_count: uploader.uploaded_count, - skipped_count: uploader.skipped_count, - uploaded_registers: uploader.uploaded_registers, - }; - - if !uploader.max_repayments_reached.is_empty() { - error!( - "The maximum repayments were reached for these addresses: {:?}", - uploader.max_repayments_reached - ); - return Err(ClientError::UploadFailedWithMaximumRepaymentsReached { - items: uploader.max_repayments_reached.into_iter().collect(), - summary, - }); - } - - return Ok(summary); - } - - // try to GET register if we have enough buffer. - // The results of the get & push register steps are used to fill up `pending_to_get_store` cost - // Since the get store cost list is the init state, we don't have to check if it is not full. - while !uploader.pending_to_get_register.is_empty() - && uploader.on_going_get_register.len() < uploader.cfg.batch_size - { - if let Some(reg_addr) = uploader.pending_to_get_register.pop() { - trace!("Conditions met for GET registers {:?}", reg_addr.xorname()); - let _ = uploader.on_going_get_register.insert(reg_addr.xorname()); - interface.submit_get_register_task( - uploader.client.clone(), - reg_addr, - task_result_sender.clone(), - ); - } - } - - // try to push register if we have enough buffer. - // No other checks for the same reason as the above step. - while !uploader.pending_to_push_register.is_empty() - && uploader.on_going_get_register.len() < uploader.cfg.batch_size - { - let upload_item = uploader.pop_item_for_push_register()?; - trace!( - "Conditions met for push registers {:?}", - upload_item.xorname() - ); - let _ = uploader - .on_going_push_register - .insert(upload_item.xorname()); - interface.submit_push_register_task( - upload_item, - uploader.cfg.verify_store, - task_result_sender.clone(), - ); - } - - // try to get store cost for an item if pending_to_pay needs items & if we have enough buffer. - while !uploader.pending_to_get_store_cost.is_empty() - && uploader.on_going_get_cost.len() < uploader.cfg.batch_size - && uploader.pending_to_pay.len() < uploader.cfg.batch_size - { - let (xorname, address, get_store_cost_strategy) = - uploader.pop_item_for_get_store_cost()?; - trace!("Conditions met for get store cost. {xorname:?} {get_store_cost_strategy:?}",); - - let _ = uploader.on_going_get_cost.insert(xorname); - interface.submit_get_store_cost_task( - uploader.client.clone(), - uploader.wallet_api.clone(), - xorname, - address, - get_store_cost_strategy, - uploader.cfg.max_repayments_for_failed_data, - task_result_sender.clone(), - ); - } - - // try to make payment for an item if pending_to_upload needs items & if we have enough buffer. - while !uploader.pending_to_pay.is_empty() - && uploader.on_going_payments.len() < uploader.cfg.batch_size - && uploader.pending_to_upload.len() < uploader.cfg.batch_size - { - let (upload_item, quote) = uploader.pop_item_for_make_payment()?; - trace!( - "Conditions met for making payments. {:?} {quote:?}", - upload_item.xorname() - ); - let _ = uploader.on_going_payments.insert(upload_item.xorname()); - - interface - .submit_make_payment_task(Some((upload_item, quote)), make_payment_sender.clone()); - } - - // try to upload if we have enough buffer to upload. - while !uploader.pending_to_upload.is_empty() - && uploader.on_going_uploads.len() < uploader.cfg.batch_size - { - #[cfg(test)] - trace!("UPLOADER STATE: upload_item : {uploader:?}"); - let upload_item = uploader.pop_item_for_upload_item()?; - - trace!("Conditions met for uploading. {:?}", upload_item.xorname()); - let _ = uploader.on_going_uploads.insert(upload_item.xorname()); - interface.submit_upload_item_task( - upload_item, - uploader.client.clone(), - uploader.wallet_api.clone(), - uploader.cfg.verify_store, - uploader.cfg.retry_strategy, - task_result_sender.clone(), - ); - } - - // Fire None to trigger a forced round of making leftover payments, if there are not enough store cost tasks - // to fill up the buffer. - if uploader.pending_to_get_store_cost.is_empty() - && uploader.on_going_get_cost.is_empty() - && !uploader.on_going_payments.is_empty() - && uploader.on_going_payments.len() < uploader.cfg.batch_size - { - #[cfg(test)] - trace!("UPLOADER STATE: make_payment (forced): {uploader:?}"); - - debug!("There are not enough on going payments to trigger a batch Payment and no get_store_costs to fill the batch. Triggering forced round of payment"); - interface.submit_make_payment_task(None, make_payment_sender.clone()); - } - - #[cfg(test)] - trace!("UPLOADER STATE: before await task result: {uploader:?}"); - - trace!("Fetching task result"); - let task_result = task_result_receiver - .recv() - .await - .ok_or(ClientError::InternalTaskChannelDropped)?; - trace!("Received task result: {task_result:?}"); - match task_result { - TaskResult::GetRegisterFromNetworkOk { remote_register } => { - // if we got back the register, then merge & PUT it. - let xorname = remote_register.address().xorname(); - trace!("TaskResult::GetRegisterFromNetworkOk for remote register: {xorname:?} \n{remote_register:?}"); - let _ = uploader.on_going_get_register.remove(&xorname); - - let reg = uploader - .all_upload_items - .get_mut(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - if let UploadItem::Register { reg, .. } = reg { - // todo: not error out here - reg.register.merge(&remote_register)?; - uploader.pending_to_push_register.push(xorname); - } - } - TaskResult::GetRegisterFromNetworkErr(xorname) => { - // then the register is a new one. It can follow the same flow as chunks now. - let _ = uploader.on_going_get_register.remove(&xorname); - - uploader - .pending_to_get_store_cost - .push((xorname, GetStoreCostStrategy::Cheapest)); - } - TaskResult::PushRegisterOk { updated_register } => { - // push modifies the register, so we return this instead of the one from all_upload_items - let xorname = updated_register.address().xorname(); - let _ = uploader.on_going_push_register.remove(&xorname); - uploader.skipped_count += 1; - let _ = uploader - .uploaded_addresses - .insert(NetworkAddress::from_register_address( - *updated_register.address(), - )); - - let _old_register = uploader - .all_upload_items - .remove(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*updated_register.address(), updated_register.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUpdated(updated_register)); - } - TaskResult::PushRegisterErr(xorname) => { - // the register failed to be Pushed. Retry until failure. - let _ = uploader.on_going_push_register.remove(&xorname); - uploader.pending_to_push_register.push(xorname); - - uploader.push_register_errors += 1; - if uploader.push_register_errors > MAX_SEQUENTIAL_NETWORK_ERRORS { - error!("Max sequential network failures reached during PushRegisterErr."); - return Err(ClientError::SequentialNetworkErrors); - } - } - TaskResult::GetStoreCostOk { xorname, quote } => { - let _ = uploader.on_going_get_cost.remove(&xorname); - uploader.get_store_cost_errors = 0; // reset error if Ok. We only throw error after 'n' sequential errors - - trace!("GetStoreCostOk for {xorname:?}'s store_cost {:?}", quote.2); - - if quote.2.cost != NanoTokens::zero() { - uploader.pending_to_pay.push((xorname, quote)); - } - // if cost is 0, then it already in the network. - else { - // remove the item since we have uploaded it. - let removed_item = uploader - .all_upload_items - .remove(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - let _ = uploader.uploaded_addresses.insert(removed_item.address()); - trace!("{xorname:?} has store cost of 0 and it already exists on the network"); - uploader.skipped_count += 1; - - // if during the first try we skip the item, then it is already present in the network. - match removed_item { - UploadItem::Chunk { address, .. } => { - uploader.emit_upload_event(UploadEvent::ChunkAlreadyExistsInNetwork( - address, - )); - } - - UploadItem::Register { reg, .. } => { - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*reg.address(), reg.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUpdated(reg)); - } - } - } - } - TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - } => { - let _ = uploader.on_going_get_cost.remove(&xorname); - trace!("GetStoreCostErr for {xorname:?} , get_store_cost_strategy: {get_store_cost_strategy:?}, max_repayments_reached: {max_repayments_reached:?}"); - - // If max repayments reached, track it separately. Else retry get_store_cost. - if max_repayments_reached { - error!("Max repayments reached for {xorname:?}. Skipping upload for it"); - uploader.max_repayments_reached.insert(xorname); - uploader.all_upload_items.remove(&xorname); - } else { - // use the same strategy. The repay different payee is set only if upload fails. - uploader - .pending_to_get_store_cost - .push((xorname, get_store_cost_strategy.clone())); - } - uploader.get_store_cost_errors += 1; - if uploader.get_store_cost_errors > MAX_SEQUENTIAL_NETWORK_ERRORS { - error!("Max sequential network failures reached during GetStoreCostErr."); - return Err(ClientError::SequentialNetworkErrors); - } - } - TaskResult::MakePaymentsOk { - paid_xornames, - storage_cost, - royalty_fees, - new_balance, - } => { - trace!("MakePaymentsOk for {} items: hash({:?}), with {storage_cost:?} store_cost and {royalty_fees:?} royalty_fees, and new_balance is {new_balance:?}", - paid_xornames.len(), InnerUploader::hash_of_xornames(paid_xornames.iter())); - for xorname in paid_xornames.iter() { - let _ = uploader.on_going_payments.remove(xorname); - } - uploader.pending_to_upload.extend(paid_xornames); - uploader.make_payments_errors = 0; - uploader.upload_final_balance = new_balance; - uploader.upload_storage_cost = uploader - .upload_storage_cost - .checked_add(storage_cost) - .ok_or(ClientError::TotalPriceTooHigh)?; - uploader.upload_royalty_fees = uploader - .upload_royalty_fees - .checked_add(royalty_fees) - .ok_or(ClientError::TotalPriceTooHigh)?; - - // reset sequential payment fail error if ok. We throw error if payment fails continuously more than - // MAX_SEQUENTIAL_PAYMENT_FAILS errors. - uploader.emit_upload_event(UploadEvent::PaymentMade { - storage_cost, - royalty_fees, - new_balance, - }); - } - TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance, - } => { - trace!( - "MakePaymentsErr for {:?} items: hash({:?})", - failed_xornames.len(), - InnerUploader::hash_of_xornames(failed_xornames.iter().map(|(name, _)| name)) - ); - if let Some((available, required)) = insufficient_balance { - error!("Wallet does not have enough funds. This error is not recoverable"); - return Err(ClientError::Wallet(WalletError::Transfer( - TransferError::NotEnoughBalance(available, required), - ))); - } - - for (xorname, quote) in failed_xornames { - let _ = uploader.on_going_payments.remove(&xorname); - uploader.pending_to_pay.push((xorname, quote)); - } - uploader.make_payments_errors += 1; - - if uploader.make_payments_errors >= MAX_SEQUENTIAL_PAYMENT_FAILS { - error!("Max sequential upload failures reached during MakePaymentsErr."); - // Too many sequential overall payment failure indicating - // unrecoverable failure of spend tx continuously rejected by network. - // The entire upload process shall be terminated. - return Err(ClientError::SequentialUploadPaymentError); - } - } - TaskResult::UploadOk(xorname) => { - let _ = uploader.on_going_uploads.remove(&xorname); - uploader.uploaded_count += 1; - trace!("UploadOk for {xorname:?}"); - // remove the item since we have uploaded it. - let removed_item = uploader - .all_upload_items - .remove(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - let _ = uploader.uploaded_addresses.insert(removed_item.address()); - - match removed_item { - UploadItem::Chunk { address, .. } => { - uploader.emit_upload_event(UploadEvent::ChunkUploaded(address)); - } - UploadItem::Register { reg, .. } => { - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*reg.address(), reg.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUploaded(reg)); - } - } - } - TaskResult::UploadErr { xorname } => { - let _ = uploader.on_going_uploads.remove(&xorname); - trace!("UploadErr for {xorname:?}"); - - // keep track of the failure - let n_errors = uploader.n_errors_during_uploads.entry(xorname).or_insert(0); - *n_errors += 1; - - // if quote has expired, don't retry the upload again. Instead get the cheapest quote again. - if *n_errors > UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE { - // if error > threshold, then select different payee. else retry again - // Also reset n_errors as we want to enable retries for the new payee. - *n_errors = 0; - debug!("Max error during upload reached for {xorname:?}. Selecting a different payee."); - - uploader - .pending_to_get_store_cost - .push((xorname, GetStoreCostStrategy::SelectDifferentPayee)); - } else { - uploader.pending_to_upload.push(xorname); - } - } - } - } -} - -impl UploaderInterface for Uploader { - fn take_inner_uploader(&mut self) -> InnerUploader { - self.inner - .take() - .expect("Uploader::new makes sure inner is present") - } - - fn submit_get_store_cost_task( - &mut self, - client: Client, - wallet_api: WalletApi, - xorname: XorName, - address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - task_result_sender: mpsc::Sender, - ) { - trace!("Spawning get_store_cost for {xorname:?}"); - let _handle = tokio::spawn(async move { - let task_result = match InnerUploader::get_store_cost( - client, - wallet_api, - xorname, - address, - get_store_cost_strategy.clone(), - max_repayments_for_failed_data, - ) - .await - { - Ok(quote) => { - debug!("StoreCosts retrieved for {xorname:?} quote: {quote:?}"); - TaskResult::GetStoreCostOk { - xorname, - quote: Box::new(quote), - } - } - Err(err) => { - error!("Encountered error {err:?} when getting store_cost for {xorname:?}",); - - let max_repayments_reached = - matches!(&err, ClientError::MaximumRepaymentsReached(_)); - - TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - } - } - }; - - let _ = task_result_sender.send(task_result).await; - }); - } - - fn submit_get_register_task( - &mut self, - client: Client, - reg_addr: RegisterAddress, - task_result_sender: mpsc::Sender, - ) { - let xorname = reg_addr.xorname(); - trace!("Spawning get_register for {xorname:?}"); - let _handle = tokio::spawn(async move { - let task_result = match InnerUploader::get_register(client, reg_addr).await { - Ok(register) => { - debug!("Register retrieved for {xorname:?}"); - TaskResult::GetRegisterFromNetworkOk { - remote_register: register, - } - } - Err(err) => { - // todo match on error to only skip if GetRecordError - warn!("Encountered error {err:?} during get_register. The register has to be PUT as it is a new one."); - TaskResult::GetRegisterFromNetworkErr(xorname) - } - }; - let _ = task_result_sender.send(task_result).await; - }); - } - - fn submit_push_register_task( - &mut self, - upload_item: UploadItem, - verify_store: bool, - task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - trace!("Spawning push_register for {xorname:?}"); - let _handle = tokio::spawn(async move { - let task_result = match InnerUploader::push_register(upload_item, verify_store).await { - Ok(reg) => { - debug!("Register pushed: {xorname:?}"); - TaskResult::PushRegisterOk { - updated_register: reg, - } - } - Err(err) => { - // todo match on error to only skip if GetRecordError - error!("Encountered error {err:?} during push_register. The register might not be present in the network"); - TaskResult::PushRegisterErr(xorname) - } - }; - let _ = task_result_sender.send(task_result).await; - }); - } - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - make_payment_sender: mpsc::Sender)>>, - ) { - let _handle = tokio::spawn(async move { - let _ = make_payment_sender.send(to_send).await; - }); - } - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - client: Client, - wallet_api: WalletApi, - verify_store: bool, - retry_strategy: RetryStrategy, - task_result_sender: mpsc::Sender, - ) { - trace!("Spawning upload item task for {:?}", upload_item.xorname()); - - let _handle = tokio::spawn(async move { - let xorname = upload_item.xorname(); - let result = InnerUploader::upload_item( - client, - wallet_api, - upload_item, - verify_store, - retry_strategy, - ) - .await; - - trace!("Upload item {xorname:?} uploaded with result {result:?}"); - match result { - Ok(_) => { - let _ = task_result_sender.send(TaskResult::UploadOk(xorname)).await; - } - Err(_) => { - let _ = task_result_sender - .send(TaskResult::UploadErr { xorname }) - .await; - } - }; - }); - } -} - -/// `Uploader` provides functionality for uploading both Chunks and Registers with support for retries and queuing. -/// This struct is not cloneable. To create a new instance with default configuration, use the `new` function. -/// To modify the configuration, use the provided setter methods (`set_...` functions). -#[derive(custom_debug::Debug)] -pub(super) struct InnerUploader { - pub(super) cfg: UploadCfg, - #[debug(skip)] - pub(super) client: Client, - #[debug(skip)] - pub(super) wallet_api: WalletApi, - pub(super) root_dir: PathBuf, - - // states - pub(super) all_upload_items: HashMap, - pub(super) pending_to_get_register: Vec, - pub(super) pending_to_push_register: Vec, - pub(super) pending_to_get_store_cost: Vec<(XorName, GetStoreCostStrategy)>, - pub(super) pending_to_pay: Vec<(XorName, Box)>, - pub(super) pending_to_upload: Vec, - - // trackers - pub(super) on_going_get_register: BTreeSet, - pub(super) on_going_push_register: BTreeSet, - pub(super) on_going_get_cost: BTreeSet, - pub(super) on_going_payments: BTreeSet, - pub(super) on_going_uploads: BTreeSet, - - // error trackers - pub(super) n_errors_during_uploads: BTreeMap, - pub(super) push_register_errors: usize, - pub(super) get_store_cost_errors: usize, - pub(super) make_payments_errors: usize, - - // Upload summary - pub(super) upload_storage_cost: NanoTokens, - pub(super) upload_royalty_fees: NanoTokens, - pub(super) upload_final_balance: NanoTokens, - pub(super) max_repayments_reached: BTreeSet, - pub(super) uploaded_addresses: BTreeSet, - pub(super) uploaded_registers: BTreeMap, - pub(super) uploaded_count: usize, - pub(super) skipped_count: usize, - - // Task channels for testing. Not used in actual code. - pub(super) testing_task_channels: - Option<(mpsc::Sender, mpsc::Receiver)>, - - // Public events events - #[debug(skip)] - pub(super) logged_event_sender_absence: bool, - #[debug(skip)] - pub(super) event_sender: Option>, -} - -impl InnerUploader { - pub(super) fn new(client: Client, root_dir: PathBuf) -> Self { - Self { - cfg: Default::default(), - client, - wallet_api: WalletApi::new_from_root_dir(&root_dir), - root_dir, - - all_upload_items: Default::default(), - pending_to_get_register: Default::default(), - pending_to_push_register: Default::default(), - pending_to_get_store_cost: Default::default(), - pending_to_pay: Default::default(), - pending_to_upload: Default::default(), - - on_going_get_register: Default::default(), - on_going_push_register: Default::default(), - on_going_get_cost: Default::default(), - on_going_payments: Default::default(), - on_going_uploads: Default::default(), - - n_errors_during_uploads: Default::default(), - push_register_errors: Default::default(), - get_store_cost_errors: Default::default(), - max_repayments_reached: Default::default(), - make_payments_errors: Default::default(), - - upload_storage_cost: NanoTokens::zero(), - upload_royalty_fees: NanoTokens::zero(), - upload_final_balance: NanoTokens::zero(), - uploaded_addresses: Default::default(), - uploaded_registers: Default::default(), - uploaded_count: Default::default(), - skipped_count: Default::default(), - - testing_task_channels: None, - logged_event_sender_absence: Default::default(), - event_sender: Default::default(), - } - } - - // ====== Pop items ====== - - fn pop_item_for_push_register(&mut self) -> Result { - if let Some(name) = self.pending_to_push_register.pop() { - let upload_item = self - .all_upload_items - .get(&name) - .cloned() - .ok_or(ClientError::UploadableItemNotFound(name))?; - Ok(upload_item) - } else { - // the caller will be making sure this does not happen. - Err(ClientError::UploadStateTrackerIsEmpty) - } - } - - fn pop_item_for_get_store_cost( - &mut self, - ) -> Result<(XorName, NetworkAddress, GetStoreCostStrategy)> { - let (xorname, strategy) = self - .pending_to_get_store_cost - .pop() - .ok_or(ClientError::UploadStateTrackerIsEmpty)?; - let address = self - .all_upload_items - .get(&xorname) - .map(|item| item.address()) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - Ok((xorname, address, strategy)) - } - - fn pop_item_for_make_payment(&mut self) -> Result<(UploadItem, Box)> { - if let Some((name, quote)) = self.pending_to_pay.pop() { - let upload_item = self - .all_upload_items - .get(&name) - .cloned() - .ok_or(ClientError::UploadableItemNotFound(name))?; - Ok((upload_item, quote)) - } else { - // the caller will be making sure this does not happen. - Err(ClientError::UploadStateTrackerIsEmpty) - } - } - - fn pop_item_for_upload_item(&mut self) -> Result { - if let Some(name) = self.pending_to_upload.pop() { - let upload_item = self - .all_upload_items - .get(&name) - .cloned() - .ok_or(ClientError::UploadableItemNotFound(name))?; - Ok(upload_item) - } else { - // the caller will be making sure this does not happen. - Err(ClientError::UploadStateTrackerIsEmpty) - } - } - - // ====== Processing Loop ====== - - // This is spawned as a long running task to prevent us from reading the wallet files - // each time we have to make a payment. - fn start_make_payment_processing_loop( - &self, - mut make_payment_receiver: mpsc::Receiver)>>, - task_result_sender: mpsc::Sender, - batch_size: usize, - ) -> Result<()> { - let mut wallet_client = Self::load_wallet_client(self.client.clone(), &self.root_dir)?; - - let verify_store = self.cfg.verify_store; - let _handle = tokio::spawn(async move { - debug!("Spawning the long running make payment processing loop."); - - let mut cost_map = BTreeMap::new(); - let mut current_batch = vec![]; - - let mut got_a_previous_force_payment = false; - while let Some(payment) = make_payment_receiver.recv().await { - let make_payments = if let Some((item, quote)) = payment { - let xorname = item.xorname(); - trace!("Inserted {xorname:?} into cost_map"); - - current_batch.push((xorname, quote.clone())); - let _ = cost_map.insert(xorname, (quote.1, quote.2, quote.0.to_bytes())); - cost_map.len() >= batch_size || got_a_previous_force_payment - } else { - // using None to indicate as all paid. - let make_payments = !cost_map.is_empty(); - trace!("Got a forced forced round of make payment."); - // Note: There can be a mismatch of ordering between the main loop and the make payment loop because - // the instructions are sent via a task(channel.send().await). And there is no guarantee for the - // order to come in the same order as they were sent. - // - // We cannot just disobey the instruction inside the child loop, as the mainloop would be expecting - // a result back for a particular instruction. - if !make_payments { - got_a_previous_force_payment = true; - warn!( - "We were told to force make payment, but cost_map is empty, so we can't do that just yet. Waiting for a task to insert a quote into cost_map" - ) - } - - make_payments - }; - - if make_payments { - // reset force_make_payment - if got_a_previous_force_payment { - info!("A task inserted a quote into cost_map, so we can now make a forced round of payment!"); - got_a_previous_force_payment = false; - } - - let _ = wallet_client - .resend_pending_transaction_blocking_loop() - .await; - - let mut terminate_process = false; - - let result = match wallet_client.pay_for_records(&cost_map, verify_store).await - { - Ok((storage_cost, royalty_fees)) => { - let paid_xornames = std::mem::take(&mut current_batch); - let paid_xornames = paid_xornames - .into_iter() - .map(|(xorname, _)| xorname) - .collect::>(); - trace!( - "Made payments for {} records: hash({:?})", - cost_map.len(), - Self::hash_of_xornames(paid_xornames.iter()) - ); - TaskResult::MakePaymentsOk { - paid_xornames, - storage_cost, - royalty_fees, - new_balance: wallet_client.balance(), - } - } - Err(err) => { - let failed_xornames = std::mem::take(&mut current_batch); - error!( - "When paying {} data: hash({:?}) got error {err:?}", - failed_xornames.len(), - Self::hash_of_xornames( - failed_xornames.iter().map(|(name, _)| name) - ) - ); - match err { - WalletError::Transfer(TransferError::NotEnoughBalance( - available, - required, - )) => { - terminate_process = true; - TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance: Some((available, required)), - } - } - _ => TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance: None, - }, - } - } - }; - let pay_for_chunk_sender_clone = task_result_sender.clone(); - let _handle = tokio::spawn(async move { - let _ = pay_for_chunk_sender_clone.send(result).await; - }); - - cost_map = BTreeMap::new(); - - if terminate_process { - // The error will trigger the entire upload process to be terminated. - // Hence here we shall terminate the inner loop first, - // to avoid the wallet going furhter to be potentially got corrupted. - warn!( - "Terminating make payment processing loop due to un-recoverable error." - ); - break; - } - } - } - debug!("Make payment processing loop terminated."); - }); - Ok(()) - } - - // ====== Logic ====== - - async fn get_register(client: Client, reg_addr: RegisterAddress) -> Result { - let reg = client.verify_register_stored(reg_addr).await?; - let reg = reg.register()?; - Ok(reg) - } - - async fn push_register(upload_item: UploadItem, verify_store: bool) -> Result { - let mut reg = if let UploadItem::Register { reg, .. } = upload_item { - reg - } else { - return Err(ClientError::InvalidUploadItemFound); - }; - reg.push(verify_store).await?; - Ok(reg) - } - - async fn get_store_cost( - client: Client, - wallet_api: WalletApi, - xorname: XorName, - address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - ) -> Result { - let filter_list = match get_store_cost_strategy { - GetStoreCostStrategy::Cheapest => vec![], - GetStoreCostStrategy::SelectDifferentPayee => { - // Check if we have already made payment for the provided xorname. If so filter out those payee - let filter_list = wallet_api - .get_all_payments(&xorname)? - .into_iter() - .map(|details| { - PeerId::from_bytes(&details.peer_id_bytes).map_err(|_| { - ClientError::Wallet(WalletError::NoPaymentForAddress(xorname)) - }) - }) - .collect::>>()?; - - // if we have already made initial + max_repayments, then we should error out. - if Self::have_we_reached_max_repayments( - filter_list.len(), - max_repayments_for_failed_data, - ) { - // error is used by the caller. - return Err(ClientError::MaximumRepaymentsReached(xorname)); - } - - debug!("Filtering out payments from {filter_list:?} during get_store_cost for {xorname:?}"); - filter_list - } - }; - let quote = client - .network - .get_store_costs_from_network(address, filter_list) - .await?; - Ok(quote) - } - - async fn upload_item( - client: Client, - wallet_api: WalletApi, - upload_item: UploadItem, - verify_store: bool, - retry_strategy: RetryStrategy, - ) -> Result<()> { - let xorname = upload_item.xorname(); - - let payment_details = wallet_api.get_recent_payment(&xorname)?; - let payment = payment_details.to_payment(); - let payee = PeerId::from_bytes(&payment_details.peer_id_bytes) - .map_err(|_| ClientError::Wallet(WalletError::NoPaymentForAddress(xorname)))?; - - debug!("Payments for upload item: {xorname:?} to {payee:?}: {payment:?}"); - - match upload_item { - UploadItem::Chunk { address: _, chunk } => { - let chunk = match chunk { - Either::Left(chunk) => chunk, - Either::Right(path) => { - let bytes = std::fs::read(path)?; - Chunk::new(Bytes::from(bytes)) - } - }; - - trace!("Client upload started for chunk: {xorname:?}"); - client - .store_chunk(chunk, payee, payment, verify_store, Some(retry_strategy)) - .await?; - trace!("Client upload completed for chunk: {xorname:?}"); - } - UploadItem::Register { address: _, reg } => { - let signature = client.sign(reg.register.bytes()?); - trace!("Client upload started for register: {xorname:?}"); - - ClientRegister::publish_register( - client, - RegisterCmd::Create { - register: reg.register, - signature, - }, - Some((payment, payee)), - verify_store, - ) - .await?; - trace!("Client upload completed for register: {xorname:?}"); - } - } - // remove the payment if the upload is successful. - wallet_api.remove_payment_transaction(&xorname); - - Ok(()) - } - - // ====== Misc ====== - - fn emit_upload_event(&mut self, event: UploadEvent) { - if let Some(sender) = self.event_sender.as_ref() { - let sender_clone = sender.clone(); - let _handle = tokio::spawn(async move { - if let Err(err) = sender_clone.send(event).await { - error!("Error emitting upload event: {err:?}"); - } - }); - } else if !self.logged_event_sender_absence { - info!("FilesUpload upload event sender is not set. Use get_upload_events() if you need to keep track of the progress"); - self.logged_event_sender_absence = true; - } - } - - /// If we have already made initial + max_repayments_allowed, then we should error out. - // separate function as it is used in test. - pub(super) fn have_we_reached_max_repayments( - payments_made: usize, - max_repayments_allowed: usize, - ) -> bool { - // if max_repayments_allowed = 1, then we have reached capacity = true if 2 payments have been made. i.e., - // i.e., 1 initial + 1 repayment. - payments_made > max_repayments_allowed - } - - /// Create a new WalletClient for a given root directory. - fn load_wallet_client(client: Client, root_dir: &Path) -> Result { - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - Ok(WalletClient::new(client, wallet)) - } - - // Used to debug a list of xornames. - fn hash_of_xornames<'a>(xornames: impl Iterator) -> String { - let mut output = [0; 32]; - let mut hasher = Sha3::v256(); - for xorname in xornames { - hasher.update(xorname); - } - hasher.finalize(&mut output); - - hex::encode(output) - } -} diff --git a/sn_client/src/wallet.rs b/sn_client/src/wallet.rs deleted file mode 100644 index 93c6439b3a..0000000000 --- a/sn_client/src/wallet.rs +++ /dev/null @@ -1,1157 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::Error; - -use super::{error::Result, Client}; -use backoff::{backoff::Backoff, ExponentialBackoff}; -use futures::{future::join_all, TryFutureExt}; -use libp2p::PeerId; -use sn_networking::target_arch::Instant; -use sn_networking::{GetRecordError, PayeeQuote}; -use sn_protocol::NetworkAddress; -use sn_transfers::{ - CashNote, HotWallet, MainPubkey, NanoTokens, Payment, PaymentQuote, SignedSpend, SpendAddress, - Transfer, WalletError, WalletResult, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - iter::Iterator, -}; -use tokio::{ - task::JoinSet, - time::{sleep, Duration}, -}; -use xor_name::XorName; - -const MAX_RESEND_PENDING_TX_ATTEMPTS: usize = 10; - -/// A wallet client can be used to send and receive tokens to and from other wallets. -pub struct WalletClient { - client: Client, - wallet: HotWallet, -} - -/// The result of the payment made for a set of Content Addresses -pub struct StoragePaymentResult { - pub storage_cost: NanoTokens, - pub royalty_fees: NanoTokens, - pub skipped_chunks: Vec, -} - -impl WalletClient { - /// Create a new wallet client. - /// - /// # Arguments - /// * `client` - A instance of the struct [`sn_client::Client`](Client) - /// * `wallet` - An instance of the struct [`HotWallet`] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// # Ok(()) - /// # } - /// ``` - pub fn new(client: Client, wallet: HotWallet) -> Self { - Self { client, wallet } - } - - /// Stores the wallet to the local wallet directory. - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// wallet_client.store_local_wallet()?; - /// # Ok(()) - /// # } - pub fn store_local_wallet(&mut self) -> WalletResult<()> { - self.wallet.deposit_and_store_to_disk(&vec![]) - } - - /// Display the wallet balance - /// # Example - /// ```no_run - /// // Display the wallet balance in the terminal - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// println!("{}" ,wallet_client.balance()); - /// # Ok(()) - /// # } - pub fn balance(&self) -> NanoTokens { - self.wallet.balance() - } - - /// See if any unconfirmed transactions exist. - /// # Example - /// ```no_run - /// // Print unconfirmed spends to the terminal - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// if wallet_client.unconfirmed_spend_requests_exist() {println!("Unconfirmed spends exist!")}; - /// # Ok(()) - /// # } - pub fn unconfirmed_spend_requests_exist(&self) -> bool { - self.wallet.unconfirmed_spend_requests_exist() - } - - /// Returns the most recent cached Payment for a provided NetworkAddress. This function does not check if the - /// quote has expired or not. Use get_non_expired_payment_for_addr if you want to get a non expired one. - /// - /// If multiple payments have been made to the same address, then we pick the last one as it is the most recent. - /// - /// # Arguments - /// * `address` - The [`NetworkAddress`]. - /// - /// # Example - /// ```no_run - /// // Getting the payment for an address using a random PeerId - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use libp2p_identity::PeerId; - /// use sn_protocol::NetworkAddress; - /// - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let payment = wallet_client.get_recent_payment_for_addr(&network_address)?; - /// # Ok(()) - /// # } - /// ``` - pub fn get_recent_payment_for_addr( - &self, - address: &NetworkAddress, - ) -> WalletResult<(Payment, PeerId)> { - let xorname = address - .as_xorname() - .ok_or(WalletError::InvalidAddressType)?; - let payment_detail = self.wallet.api().get_recent_payment(&xorname)?; - - let payment = payment_detail.to_payment(); - trace!("Payment retrieved for {xorname:?} from wallet: {payment:?}"); - let peer_id = PeerId::from_bytes(&payment_detail.peer_id_bytes) - .map_err(|_| WalletError::NoPaymentForAddress(xorname))?; - - Ok((payment, peer_id)) - } - - /// Returns the all cached Payment for a provided NetworkAddress. - /// - /// # Arguments - /// * `address` - The [`NetworkAddress`]. - /// - /// # Example - /// ```no_run - /// // Getting the payment for an address using a random PeerId - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use libp2p_identity::PeerId; - /// use sn_protocol::NetworkAddress; - /// - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let payments = wallet_client.get_all_payments_for_addr(&network_address)?; - /// # Ok(()) - /// # } - /// ``` - pub fn get_all_payments_for_addr( - &self, - address: &NetworkAddress, - ) -> WalletResult> { - let xorname = address - .as_xorname() - .ok_or(WalletError::InvalidAddressType)?; - let payment_details = self.wallet.api().get_all_payments(&xorname)?; - - let payments = payment_details - .into_iter() - .map(|details| { - let payment = details.to_payment(); - - match PeerId::from_bytes(&details.peer_id_bytes) { - Ok(peer_id) => Ok((payment, peer_id)), - Err(_) => Err(WalletError::NoPaymentForAddress(xorname)), - } - }) - .collect::>>()?; - - trace!( - "{} Payment retrieved for {xorname:?} from wallet: {payments:?}", - payments.len() - ); - - Ok(payments) - } - - /// Remove the payment for a given network address from disk. - /// - /// # Arguments - /// * `address` - The [`NetworkAddress`]. - /// - /// # Example - /// ```no_run - /// // Removing a payment address using a random PeerId - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use libp2p_identity::PeerId; - /// use sn_protocol::NetworkAddress; - /// - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let payment = wallet_client.remove_payment_for_addr(&network_address)?; - /// # Ok(()) - /// # } - /// ``` - pub fn remove_payment_for_addr(&self, address: &NetworkAddress) -> WalletResult<()> { - match &address.as_xorname() { - Some(xorname) => { - self.wallet.api().remove_payment_transaction(xorname); - Ok(()) - } - None => Err(WalletError::InvalidAddressType), - } - } - - /// Send tokens to another wallet. Can also verify the store has been successful. - /// Verification will be attempted via GET request through a Spend on the network. - /// - /// # Arguments - /// * `amount` - [`NanoTokens`]. - /// * `to` - [`MainPubkey`]. - /// * `verify_store` - A boolean to verify store. Set this to true for mandatory verification. - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use sn_transfers::NanoTokens; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let nano = NanoTokens::from(10); - /// let main_pub_key = MainSecretKey::random().main_pubkey(); - /// let payment = wallet_client.send_cash_note(nano,main_pub_key, true); - /// # Ok(()) - /// # } - /// ``` - pub async fn send_cash_note( - &mut self, - amount: NanoTokens, - to: MainPubkey, - verify_store: bool, - ) -> WalletResult { - let created_cash_notes = self.wallet.local_send(vec![(amount, to)], None)?; - - // send to network - if let Err(error) = self - .client - .send_spends( - self.wallet.unconfirmed_spend_requests().iter(), - verify_store, - ) - .await - { - return Err(WalletError::CouldNotSendMoney(format!( - "The transfer was not successfully registered in the network: {error:?}" - ))); - } else { - // clear unconfirmed txs - self.wallet.clear_confirmed_spend_requests(); - } - - // return the first CashNote (assuming there is only one because we only sent to one recipient) - match &created_cash_notes[..] { - [cashnote] => Ok(cashnote.clone()), - [_multiple, ..] => Err(WalletError::CouldNotSendMoney( - "Multiple CashNotes were returned from the transaction when only one was expected. This is a BUG." - .into(), - )), - [] => Err(WalletError::CouldNotSendMoney( - "No CashNotes were returned from the wallet.".into(), - )), - } - } - - /// Get storecost from the network - /// Returns the MainPubkey of the node to pay and the price in NanoTokens - /// - /// # Arguments - /// - content_addrs - [Iterator] - /// - /// # Returns: - /// * [WalletResult]<[StoragePaymentResult]> - /// - /// # Example - ///```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use xor_name::XorName; - /// use sn_protocol::NetworkAddress; - /// use libp2p_identity::PeerId; - /// use sn_registers::{Permissions, RegisterAddress}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// # let mut rng = rand::thread_rng(); - /// # let xor_name = XorName::random(&mut rng); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let mut wallet_client = WalletClient::new(client, wallet); - /// // Use get_store_cost_at_address(network_address) to get a storecost from the network. - /// let cost = wallet_client.get_store_cost_at_address(network_address).await?.2.cost.as_nano(); - /// # Ok(()) - /// # } - pub async fn get_store_cost_at_address( - &self, - address: NetworkAddress, - ) -> WalletResult { - self.client - .network - .get_store_costs_from_network(address, vec![]) - .await - .map_err(|error| WalletError::CouldNotSendMoney(error.to_string())) - } - - /// Send tokens to nodes closest to the data we want to make storage payment for. Runs mandatory verification. - /// - /// # Arguments - /// - content_addrs - [Iterator] - /// - /// # Returns: - /// * [WalletResult]<[StoragePaymentResult]> - /// - /// # Example - ///```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use xor_name::XorName; - /// use sn_protocol::NetworkAddress; - /// use sn_registers::{Permissions, RegisterAddress}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// let mut rng = rand::thread_rng(); - /// let xor_name = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xor_name, client.signer_pk()); - /// let net_addr = NetworkAddress::from_register_address(address); - /// - /// // Paying for a random Register Address - /// let cost = wallet_client.pay_for_storage(std::iter::once(net_addr)).await?; - /// # Ok(()) - /// # } - pub async fn pay_for_storage( - &mut self, - content_addrs: impl Iterator, - ) -> WalletResult { - let verify_store = true; - let c: Vec<_> = content_addrs.collect(); - // Using default ExponentialBackoff doesn't make sense, - // as it will just fail after the first payment failure. - let mut backoff = ExponentialBackoff::default(); - let mut last_err = "No retries".to_string(); - - while let Some(delay) = backoff.next_backoff() { - trace!("Paying for storage (w/backoff retries) for: {:?}", c); - match self - .pay_for_storage_once(c.clone().into_iter(), verify_store) - .await - { - Ok(payment_result) => return Ok(payment_result), - Err(WalletError::CouldNotSendMoney(err)) => { - warn!("Attempt to pay for data failed: {err:?}"); - last_err = err; - sleep(delay).await; - } - Err(err) => return Err(err), - } - } - Err(WalletError::CouldNotSendMoney(last_err)) - } - - /// Existing chunks will have the store cost set to Zero. - /// The payment procedure shall be skipped, and the chunk upload as well. - /// Hence the list of existing chunks will be returned. - async fn pay_for_storage_once( - &mut self, - content_addrs: impl Iterator, - verify_store: bool, - ) -> WalletResult { - // get store cost from network in parallel - let mut tasks = JoinSet::new(); - for content_addr in content_addrs { - let client = self.client.clone(); - tasks.spawn(async move { - let cost = client - .network - .get_store_costs_from_network(content_addr.clone(), vec![]) - .await - .map_err(|error| WalletError::CouldNotSendMoney(error.to_string())); - - debug!("Storecosts retrieved for {content_addr:?} {cost:?}"); - (content_addr, cost) - }); - } - debug!("Pending store cost tasks: {:?}", tasks.len()); - - // collect store costs - let mut cost_map = BTreeMap::default(); - let mut skipped_chunks = vec![]; - while let Some(res) = tasks.join_next().await { - match res { - Ok((content_addr, Ok(cost))) => { - if let Some(xorname) = content_addr.as_xorname() { - if cost.2.cost == NanoTokens::zero() { - skipped_chunks.push(xorname); - debug!("Skipped existing chunk {content_addr:?}"); - } else { - debug!("Storecost inserted into payment map for {content_addr:?}"); - let _ = cost_map.insert(xorname, (cost.1, cost.2, cost.0.to_bytes())); - } - } else { - warn!("Cannot get store cost for a content that is not a data type: {content_addr:?}"); - } - } - Ok((content_addr, Err(err))) => { - warn!("Cannot get store cost for {content_addr:?} with error {err:?}"); - return Err(err); - } - Err(e) => { - return Err(WalletError::CouldNotSendMoney(format!( - "Storecost get task failed: {e:?}" - ))); - } - } - } - info!("Storecosts retrieved for all the provided content addrs"); - - // pay for records - let (storage_cost, royalty_fees) = self.pay_for_records(&cost_map, verify_store).await?; - let res = StoragePaymentResult { - storage_cost, - royalty_fees, - skipped_chunks, - }; - Ok(res) - } - - /// Send tokens to nodes closest to the data that we want to make storage payments for. - /// # Returns: - /// - /// * [WalletResult]<([NanoTokens], [NanoTokens])> - /// - /// This return contains the amount paid for storage. Including the network royalties fee paid. - /// - /// # Params: - /// * cost_map - [BTreeMap]([XorName],([MainPubkey], [PaymentQuote])) - /// * verify_store - This optional check can verify if the store has been successful. - /// - /// Verification will be attempted via GET request through a Spend on the network. - /// - /// # Example - ///```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeMap; - /// use xor_name::XorName; - /// use sn_transfers::{MainPubkey, Payment, PaymentQuote}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let mut cost_map:BTreeMap)> = BTreeMap::new(); - /// wallet_client.pay_for_records(&cost_map,true).await?; - /// # Ok(()) - /// # } - pub async fn pay_for_records( - &mut self, - cost_map: &BTreeMap)>, - verify_store: bool, - ) -> WalletResult<(NanoTokens, NanoTokens)> { - // Before wallet progress, there shall be no `unconfirmed_spend_requests` - self.resend_pending_transaction_until_success(verify_store) - .await?; - let start = Instant::now(); - let total_cost = self.wallet.local_send_storage_payment(cost_map)?; - - trace!( - "local_send_storage_payment of {} chunks completed in {:?}", - cost_map.len(), - start.elapsed() - ); - - // send to network - trace!("Sending storage payment transfer to the network"); - let start = Instant::now(); - let spend_attempt_result = self - .client - .send_spends( - self.wallet.unconfirmed_spend_requests().iter(), - verify_store, - ) - .await; - - trace!( - "send_spends of {} chunks completed in {:?}", - cost_map.len(), - start.elapsed() - ); - - // Here is bit risky that for the whole bunch of spends to the chunks' store_costs and royalty_fee - // they will get re-paid again for ALL, if any one of the payment failed to be put. - let start = Instant::now(); - if let Err(error) = spend_attempt_result { - warn!("The storage payment transfer was not successfully registered in the network: {error:?}. It will be retried later."); - - // if we have a DoubleSpend error, lets remove the CashNote from the wallet - if let WalletError::DoubleSpendAttemptedForCashNotes(spent_cash_notes) = &error { - for cash_note_key in spent_cash_notes { - warn!("Removing double spends CashNote from wallet: {cash_note_key:?}"); - self.wallet.mark_notes_as_spent([cash_note_key]); - self.wallet.clear_specific_spend_request(*cash_note_key); - } - } - - self.wallet.store_unconfirmed_spend_requests()?; - - return Err(WalletError::CouldNotSendMoney(format!( - "The storage payment transfer was not successfully registered in the network: {error:?}" - ))); - } else { - info!("Spend has completed: {:?}", spend_attempt_result); - self.wallet.clear_confirmed_spend_requests(); - } - trace!( - "clear up spends of {} chunks completed in {:?}", - cost_map.len(), - start.elapsed() - ); - - Ok(total_cost) - } - - /// Resend failed transactions. This can optionally verify the store has been successful. - /// This will attempt to GET the cash_note from the network. - async fn resend_pending_transactions(&mut self, verify_store: bool) { - if self - .client - .send_spends( - self.wallet.unconfirmed_spend_requests().iter(), - verify_store, - ) - .await - .is_ok() - { - self.wallet.clear_confirmed_spend_requests(); - } - } - - /// Resend previous confirmed spend. - async fn resend_confirmed_spend(&mut self, spend_addr: &SpendAddress) { - if let Ok(Some(spend)) = self.wallet.get_confirmed_spend(*spend_addr) { - let spend_vec = vec![spend]; - let _ = self.client.send_spends(spend_vec.iter(), true).await; - } else { - warn!("Cann't find confirmed spend of {spend_addr:?}"); - println!("Cann't find confirmed spend of {spend_addr:?}"); - } - } - - /// This is a blocking loop in cas there is pending transaction. - /// It will keeps resending the unconfirmed spend infinitely but explictly. - /// Function will only return on success (all unconfirmed spend uploaded), - /// or user chose to manualy, but safely, terminate the procedure. - pub async fn resend_pending_transaction_blocking_loop(&mut self) -> WalletResult<()> { - if !self.wallet.unconfirmed_spend_requests_exist() { - return Ok(()); - } - // Wallet shall be all clear to progress forward. - while self.wallet.unconfirmed_spend_requests_exist() { - info!("Pre-Unconfirmed transactions dected, sending again after 30 seconds..."); - println!("Pre-Unconfirmed transactions exist, sending again after 30 seconds..."); - println!("It's safe to terminate the work, but do remember to retain the unconfirmed_spend file during wallet update."); - println!("Otherwise, you are in risk to make the wallet corrupted."); - // Longer wait as the network will already in heavy duty situation, - // hence try not to give it further burden with short intervaled re-puts. - sleep(Duration::from_secs(30)).await; - - // Before re-sending, take a peek of un-confirmed spends first - // Helping user having a better view of what's happening. - let spends_to_check: BTreeMap> = self - .wallet - .unconfirmed_spend_requests() - .iter() - .map(|s| { - info!( - "Unconfirmed spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - info!("====== descendants : {:?} ", s.spend.descendants); - info!("====== ancestors : {:?} ", s.spend.ancestors); - println!( - "Unconfirmed spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - println!("====== descendants : {:?} ", s.spend.descendants); - println!("====== ancestors : {:?} ", s.spend.ancestors); - - let parent_spends: BTreeSet<_> = s - .spend - .ancestors - .iter() - .map(SpendAddress::from_unique_pubkey) - .collect(); - (s.address(), parent_spends) - }) - .collect(); - let unconfirmed_spends_addrs: Vec<_> = spends_to_check.keys().copied().collect(); - - for addr in unconfirmed_spends_addrs { - match self.client.peek_a_spend(addr).await { - Ok(_) => { - info!("Unconfirmed Spend {addr:?} is find having at least one copy in the network !"); - println!( - "Unconfirmed Spend {addr:?} is find at least one copy in the network !" - ); - } - Err(err) => { - info!( - "Unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - println!( - "Unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - // For those that still not even have one copy in network yet - // Check it's parent's status in network - if let Some(parent_spends) = spends_to_check.get(&addr) { - for parent_addr in parent_spends.iter() { - match self.client.peek_a_spend(*parent_addr).await { - Ok(s) => { - info!("Parent {parent_addr:?} of unconfirmed Spend {addr:?} is find having at least one copy in the network !"); - println!("Parent {parent_addr:?} of unconfirmed Spend {addr:?} is find having at least one copy in the network !"); - info!( - "Parent spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - info!("====== descendants : {:?} ", s.spend.descendants); - info!("====== ancestors : {:?} ", s.spend.ancestors); - println!( - "Parent spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - println!("====== descendants : {:?} ", s.spend.descendants); - println!("====== ancestors : {:?} ", s.spend.ancestors); - } - Err(err) => { - warn!( - "Parent {parent_addr:?} of unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - println!( - "Parent {parent_addr:?} of unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - // In theory, it shall be traversed back to re-send all ancestors. - // However, in practical, only track back one generation is enough. - self.resend_confirmed_spend(parent_addr).await; - } - } - } - } - } - } - } - - self.resend_pending_transactions(true).await; - } - info!("Wallet is now all cleared, OK to progress further."); - println!("Wallet is now all cleared, OK to progress further."); - eprintln!("WARNING: Closing the client now could corrupt the wallet !"); - Ok(()) - } - - /// Try resending failed transactions multiple times until it succeeds or until we reach max attempts. - async fn resend_pending_transaction_until_success( - &mut self, - verify_store: bool, - ) -> WalletResult<()> { - let mut did_error = false; - // Wallet shall be all clear to progress forward. - let mut attempts = 0; - while self.wallet.unconfirmed_spend_requests_exist() { - info!("Pre-Unconfirmed transactions exist, sending again after 1 second..."); - sleep(Duration::from_secs(1)).await; - self.resend_pending_transactions(verify_store).await; - - if attempts > MAX_RESEND_PENDING_TX_ATTEMPTS { - // save the error state, but break out of the loop so we can save - did_error = true; - break; - } - - attempts += 1; - } - - if did_error { - error!("Wallet has pre-unconfirmed transactions, can't progress further."); - Err(WalletError::UnconfirmedTxAfterRetries) - } else { - Ok(()) - } - } - - /// Returns the wallet: - /// - /// Return type: [HotWallet] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let paying_wallet = wallet_client.into_wallet(); - /// // Display the wallet balance in the terminal - /// println!("{}",paying_wallet.balance()); - /// # Ok(()) - /// # } - pub fn into_wallet(self) -> HotWallet { - self.wallet - } - - /// Returns a mutable wallet instance - /// - /// Return type: [HotWallet] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let paying_wallet = wallet_client.mut_wallet(); - /// // Display the mutable wallet balance in the terminal - /// println!("{}",paying_wallet.balance()); - /// # Ok(()) - /// # } - pub fn mut_wallet(&mut self) -> &mut HotWallet { - &mut self.wallet - } -} - -impl Client { - /// Send spend requests to the network. - /// This can optionally verify the spends have been correctly stored before returning - /// - /// # Arguments - /// * spend_requests - [Iterator]<[SignedSpend]> - /// * verify_store - Boolean. Set to true for mandatory verification via a GET request through a Spend on the network. - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// // An example of sending storage payment transfers over the network with validation - /// client.send_spends(wallet.unconfirmed_spend_requests().iter(),true).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn send_spends( - &self, - spend_requests: impl Iterator, - verify_store: bool, - ) -> WalletResult<()> { - let mut tasks = Vec::new(); - - // send spends to the network in parralel - for spend_request in spend_requests { - trace!( - "sending spend request to the network: {:?}: {spend_request:#?}", - spend_request.unique_pubkey() - ); - - let the_task = async move { - let cash_note_key = spend_request.unique_pubkey(); - let result = self - .network_store_spend(spend_request.clone(), verify_store) - .await; - - (cash_note_key, result) - }; - tasks.push(the_task); - } - - // wait for all the tasks to complete and gather the errors - let mut errors = Vec::new(); - let mut double_spent_keys = BTreeSet::new(); - for (spend_key, spend_attempt_result) in join_all(tasks).await { - match spend_attempt_result { - Err(Error::Network(sn_networking::NetworkError::GetRecordError( - GetRecordError::RecordDoesNotMatch(_), - ))) - | Err(Error::Network(sn_networking::NetworkError::GetRecordError( - GetRecordError::SplitRecord { .. }, - ))) => { - warn!( - "Double spend detected while trying to spend: {:?}", - spend_key - ); - double_spent_keys.insert(*spend_key); - } - Err(e) => { - warn!("Spend request errored out when sent to the network {spend_key:?}: {e}"); - errors.push((spend_key, e)); - } - Ok(()) => { - trace!("Spend request was successfully sent to the network: {spend_key:?}"); - } - } - } - - // report errors accordingly - // double spend errors in priority as they should be dealt with by the wallet - if !double_spent_keys.is_empty() { - return Err(WalletError::DoubleSpendAttemptedForCashNotes( - double_spent_keys, - )); - } - if !errors.is_empty() { - let mut err_report = "Failed to send spend requests to the network:".to_string(); - for (spend_key, e) in &errors { - warn!("Failed to send spend request to the network: {spend_key:?}: {e}"); - err_report.push_str(&format!("{spend_key:?}: {e}")); - } - return Err(WalletError::CouldNotSendMoney(err_report)); - } - - Ok(()) - } - - /// Receive a Transfer, verify and redeem CashNotes from the Network. - /// - /// # Arguments - /// * transfer: &[Transfer] - Borrowed value for [Transfer] - /// * wallet: &[HotWallet] - Borrowed value for [HotWallet] - /// - /// # Return Value - /// * [WalletResult]<[Vec]<[CashNote]>> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use tracing::error; - /// use sn_transfers::Transfer; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let transfer = Transfer::from_hex("13abc").unwrap(); - /// // An example for using client.receive() for cashNotes - /// let cash_notes = match client.receive(&transfer, &wallet).await { - /// Ok(cash_notes) => cash_notes, - /// Err(err) => { - /// println!("Failed to verify and redeem transfer: {err:?}"); - /// error!("Failed to verify and redeem transfer: {err:?}"); - /// return Err(err.into()); - /// } - /// }; - /// # Ok(()) - /// - /// # } - /// ``` - pub async fn receive( - &self, - transfer: &Transfer, - wallet: &HotWallet, - ) -> WalletResult> { - let cashnotes = self - .network - .verify_and_unpack_transfer(transfer, wallet) - .map_err(|e| WalletError::CouldNotReceiveMoney(format!("{e:?}"))) - .await?; - let valuable_cashnotes = self.filter_out_already_spend_cash_notes(cashnotes).await?; - Ok(valuable_cashnotes) - } - - /// Check that the redeemed CashNotes are not already spent - async fn filter_out_already_spend_cash_notes( - &self, - mut cash_notes: Vec, - ) -> WalletResult> { - trace!("Validating CashNotes are not already spent"); - let mut tasks = JoinSet::new(); - for cn in &cash_notes { - let pk = cn.unique_pubkey(); - let addr = SpendAddress::from_unique_pubkey(&pk); - let self_clone = self.network.clone(); - let _ = tasks.spawn(async move { self_clone.get_spend(addr).await }); - } - while let Some(result) = tasks.join_next().await { - let res = result.map_err(|e| WalletError::FailedToGetSpend(format!("{e}")))?; - match res { - // if we get a RecordNotFound, it means the CashNote is not spent, which is good - Err(sn_networking::NetworkError::GetRecordError( - GetRecordError::RecordNotFound, - )) => (), - // if we get a spend, it means the CashNote is already spent - Ok(s) => { - warn!( - "CashNoteRedemption contains a CashNote that is already spent, skipping it: {:?}", - s.unique_pubkey() - ); - cash_notes.retain(|c| &c.unique_pubkey() != s.unique_pubkey()); - } - // report all other errors - Err(e) => return Err(WalletError::FailedToGetSpend(format!("{e}"))), - } - } - - if cash_notes.is_empty() { - return Err(WalletError::CouldNotVerifyTransfer( - "All the redeemed CashNotes are already spent".to_string(), - )); - } - - Ok(cash_notes) - } - - /// Verify that the spends referred to (in the CashNote) exist on the network. - /// - /// # Arguments - /// * cash_note - [CashNote] - /// - /// # Return value - /// [WalletResult] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use tracing::error; - /// use sn_transfers::Transfer; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let transfer = Transfer::from_hex("").unwrap(); - /// let cash_notes = client.receive(&transfer, &wallet).await?; - /// // Verification: - /// for cash_note in cash_notes { - /// println!("{:?}" , client.verify_cashnote(&cash_note).await.unwrap()); - /// } - /// # Ok(()) - /// - /// # } - /// ``` - pub async fn verify_cashnote(&self, cash_note: &CashNote) -> WalletResult<()> { - // We need to get all the spends in the cash_note from the network, - // and compare them to the spends in the cash_note, to know if the - // transfer is considered valid in the network. - let mut tasks = Vec::new(); - for spend in &cash_note.parent_spends { - let address = SpendAddress::from_unique_pubkey(spend.unique_pubkey()); - debug!( - "Getting spend for pubkey {:?} from network at {address:?}", - spend.unique_pubkey() - ); - tasks.push(self.get_spend_from_network(address)); - } - - let mut received_spends = std::collections::BTreeSet::new(); - for result in join_all(tasks).await { - let network_valid_spend = - result.map_err(|err| WalletError::CouldNotVerifyTransfer(err.to_string()))?; - let _ = received_spends.insert(network_valid_spend); - } - - // If all the spends in the cash_note are the same as the ones in the network, - // we have successfully verified that the cash_note is globally recognised and therefor valid. - if received_spends == cash_note.parent_spends { - return Ok(()); - } - Err(WalletError::CouldNotVerifyTransfer( - "The spends in network were not the same as the ones in the CashNote. The parents of this CashNote are probably double spends.".into(), - )) - } -} - -/// Use the client to send a CashNote from a local wallet to an address. -/// This marks the spent CashNote as spent in the Network -/// -/// # Arguments -/// * from - [HotWallet] -/// * amount - [NanoTokens] -/// * to - [MainPubkey] -/// * client - [Client] -/// * verify_store - Boolean. Set to true for mandatory verification via a GET request through a Spend on the network. -/// -/// # Example -/// ```no_run -/// use sn_client::{Client, WalletClient, Error}; -/// # use tempfile::TempDir; -/// use bls::SecretKey; -/// use sn_transfers::{HotWallet, MainSecretKey}; -/// # #[tokio::main] -/// # async fn main() -> Result<(),Error>{ -/// use tracing::error; -/// use sn_client::send; -/// use sn_transfers::Transfer; -/// let client = Client::new(SecretKey::random(), None, None, None).await?; -/// # let tmp_path = TempDir::new()?.path().to_owned(); -/// let mut first_wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; -/// let mut second_wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; -/// let tokens = send( -/// first_wallet, // From -/// second_wallet.balance(), // To -/// second_wallet.address(), // Amount -/// &client, // Client -/// true, // Verification -/// ).await?; -/// # Ok(()) -/// # } -/// ``` -pub async fn send( - from: HotWallet, - amount: NanoTokens, - to: MainPubkey, - client: &Client, - verify_store: bool, -) -> Result { - if amount.is_zero() { - return Err(Error::AmountIsZero); - } - - let mut wallet_client = WalletClient::new(client.clone(), from); - - if let Err(err) = wallet_client - .resend_pending_transaction_until_success(verify_store) - .await - { - println!("Wallet has pre-unconfirmed transactions, can't progress further."); - warn!("Wallet has pre-unconfirmed transactions, can't progress further."); - return Err(err.into()); - } - - let new_cash_note = wallet_client - .send_cash_note(amount, to, verify_store) - .await - .map_err(|err| { - error!("Could not send cash note, err: {err:?}"); - err - })?; - - wallet_client - .resend_pending_transaction_until_success(verify_store) - .await?; - - wallet_client - .into_wallet() - .deposit_and_store_to_disk(&vec![new_cash_note.clone()])?; - - Ok(new_cash_note) -} diff --git a/sn_client/tests/folders_api.rs b/sn_client/tests/folders_api.rs deleted file mode 100644 index 8340c3ad32..0000000000 --- a/sn_client/tests/folders_api.rs +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -// All tests require a network running so Clients can be instantiated. - -use bls::SecretKey; -use eyre::Result; -use sn_client::test_utils::{ - get_funded_wallet, get_new_client, pay_for_storage, random_file_chunk, -}; -use sn_client::{FolderEntry, FoldersApi, Metadata}; -use sn_protocol::{storage::ChunkAddress, NetworkAddress}; -use sn_registers::{EntryHash, RegisterAddress}; -use xor_name::XorName; - -#[tokio::test] -async fn test_folder_basics() -> Result<()> { - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let mut rng = rand::thread_rng(); - let owner_sk = SecretKey::random(); - let owner_pk = owner_sk.public_key(); - let address = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - let address_subdir = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - let client = get_new_client(owner_sk).await?; - let mut folders_api = FoldersApi::new(client, wallet_dir, Some(address))?; - - let file_chunk = random_file_chunk(); - - let (file_entry_hash, file_meta_xorname, file_metadata) = - folders_api.add_file("file.txt".into(), file_chunk.clone(), None)?; - assert_eq!( - file_metadata, - Metadata { - name: "file.txt".to_string(), - content: FolderEntry::File(file_chunk) - } - ); - - let (subdir_entry_hash, subdir_meta_xorname, subdir_metadata) = - folders_api.add_folder("subdir".into(), address_subdir, None)?; - assert_eq!( - subdir_metadata, - Metadata { - name: "subdir".to_string(), - content: FolderEntry::Folder(address_subdir) - } - ); - - assert_eq!(folders_api.address(), &address); - assert_eq!( - folders_api.as_net_addr(), - NetworkAddress::RegisterAddress(address) - ); - assert_eq!( - folders_api.meta_addrs_to_pay(), - vec![ - NetworkAddress::ChunkAddress(ChunkAddress::new(file_meta_xorname)), - NetworkAddress::ChunkAddress(ChunkAddress::new(subdir_meta_xorname)) - ] - .into_iter() - .collect() - ); - - assert!(folders_api.contains(&file_entry_hash)); - assert!(folders_api.contains(&subdir_entry_hash)); - assert!(!folders_api.contains(&EntryHash::default())); - - assert_eq!( - folders_api.find_by_name("file.txt"), - Some((&file_meta_xorname, &file_metadata)) - ); - assert_eq!( - folders_api.find_by_name("subdir"), - Some((&subdir_meta_xorname, &subdir_metadata)) - ); - assert!(folders_api.find_by_name("inexistent").is_none()); - - assert_eq!( - folders_api.entries().await?, - vec![ - (file_entry_hash, (file_meta_xorname, file_metadata)), - (subdir_entry_hash, (subdir_meta_xorname, subdir_metadata)) - ] - .into_iter() - .collect() - ); - - Ok(()) -} - -#[tokio::test] -async fn test_folder_remove_replace_entries() -> Result<()> { - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let owner_sk = SecretKey::random(); - let client = get_new_client(owner_sk).await?; - let mut folders_api = FoldersApi::new(client, wallet_dir, None)?; - - let file1_chunk = random_file_chunk(); - let file2_chunk = random_file_chunk(); - let file3_chunk = random_file_chunk(); - let file4_chunk = random_file_chunk(); - - let (file1_entry_hash, _, _) = - folders_api.add_file("file1.txt".into(), file1_chunk.clone(), None)?; - let (file2_entry_hash, file2_meta_xorname, file2_metadata) = - folders_api.add_file("file2.txt".into(), file2_chunk.clone(), None)?; - - assert_eq!(folders_api.entries().await?.len(), 2); - assert!(folders_api.contains(&file1_entry_hash)); - assert!(folders_api.contains(&file2_entry_hash)); - assert!(folders_api.find_by_name("file1.txt").is_some()); - assert!(folders_api.find_by_name("file2.txt").is_some()); - - // let's now test removing file1.txt - folders_api.remove_item(file1_entry_hash)?; - assert!(!folders_api.contains(&file1_entry_hash)); - assert!(folders_api.contains(&file2_entry_hash)); - assert!(folders_api.find_by_name("file1.txt").is_none()); - assert_eq!( - folders_api.find_by_name("file2.txt"), - Some((&file2_meta_xorname, &file2_metadata)) - ); - assert_eq!( - folders_api.entries().await?, - vec![(file2_entry_hash, (file2_meta_xorname, file2_metadata)),] - .into_iter() - .collect() - ); - - // now we test replacing file2.txt with file3.txt - let (file3_entry_hash, file3_meta_xorname, file3_metadata) = - folders_api.replace_file(file2_entry_hash, "file3.txt".into(), file3_chunk, None)?; - assert!(!folders_api.contains(&file2_entry_hash)); - assert!(folders_api.contains(&file3_entry_hash)); - assert!(folders_api.find_by_name("file1.txt").is_none()); - assert!(folders_api.find_by_name("file2.txt").is_none()); - assert_eq!( - folders_api.find_by_name("file3.txt"), - Some((&file3_meta_xorname, &file3_metadata)) - ); - assert_eq!( - folders_api.entries().await?, - vec![( - file3_entry_hash, - (file3_meta_xorname, file3_metadata.clone()) - ),] - .into_iter() - .collect() - ); - - // let's add file4.txt, and check that final state is correct - let (file4_entry_hash, file4_meta_xorname, file4_metadata) = - folders_api.add_file("file4.txt".into(), file4_chunk, None)?; - - assert!(!folders_api.contains(&file1_entry_hash)); - assert!(!folders_api.contains(&file2_entry_hash)); - assert!(folders_api.contains(&file3_entry_hash)); - assert!(folders_api.contains(&file4_entry_hash)); - - assert!(folders_api.find_by_name("file1.txt").is_none()); - assert!(folders_api.find_by_name("file2.txt").is_none()); - assert_eq!( - folders_api.find_by_name("file3.txt"), - Some((&file3_meta_xorname, &file3_metadata)) - ); - assert_eq!( - folders_api.find_by_name("file4.txt"), - Some((&file4_meta_xorname, &file4_metadata)) - ); - - assert_eq!( - folders_api.entries().await?, - vec![ - (file3_entry_hash, (file3_meta_xorname, file3_metadata)), - (file4_entry_hash, (file4_meta_xorname, file4_metadata)) - ] - .into_iter() - .collect() - ); - - Ok(()) -} - -#[tokio::test] -async fn test_folder_retrieve() -> Result<()> { - let _log_guards = - sn_logging::LogBuilder::init_single_threaded_tokio_test("test_folder_retrieve", false); - - let owner_sk = SecretKey::random(); - let client = get_new_client(owner_sk).await?; - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let mut folder = FoldersApi::new(client.clone(), wallet_dir, None)?; - let mut subfolder = FoldersApi::new(client.clone(), wallet_dir, None)?; - - let file1_chunk = random_file_chunk(); - - let (file1_entry_hash, file1_meta_xorname, file1_metadata) = - folder.add_file("file1.txt".into(), file1_chunk.clone(), None)?; - let (subfolder_entry_hash, subfolder_meta_xorname, subfolder_metadata) = - folder.add_folder("subfolder".into(), *subfolder.address(), None)?; - - let file2_chunk = random_file_chunk(); - let (file2_entry_hash, file2_meta_xorname, file2_metadata) = - subfolder.add_file("file2.txt".into(), file2_chunk.clone(), None)?; - - // let's pay for storage - let mut addrs2pay = vec![folder.as_net_addr(), subfolder.as_net_addr()]; - addrs2pay.extend(folder.meta_addrs_to_pay()); - addrs2pay.extend(subfolder.meta_addrs_to_pay()); - pay_for_storage(&client, wallet_dir, addrs2pay).await?; - - folder.sync(Default::default()).await?; - subfolder.sync(Default::default()).await?; - - let mut retrieved_folder = - FoldersApi::retrieve(client.clone(), wallet_dir, *folder.address()).await?; - let mut retrieved_subfolder = - FoldersApi::retrieve(client, wallet_dir, *subfolder.address()).await?; - - assert_eq!(retrieved_folder.entries().await?.len(), 2); - assert!(retrieved_folder.contains(&file1_entry_hash)); - assert!(retrieved_folder.contains(&subfolder_entry_hash)); - assert_eq!( - retrieved_folder.find_by_name("file1.txt"), - Some((&file1_meta_xorname, &file1_metadata)) - ); - assert_eq!( - retrieved_folder.find_by_name("subfolder"), - Some((&subfolder_meta_xorname, &subfolder_metadata)) - ); - - assert_eq!(retrieved_subfolder.entries().await?.len(), 1); - assert!(retrieved_subfolder.contains(&file2_entry_hash)); - assert_eq!( - retrieved_subfolder.find_by_name("file2.txt"), - Some((&file2_meta_xorname, &file2_metadata)) - ); - - assert_eq!( - retrieved_folder.entries().await?, - vec![ - (file1_entry_hash, (file1_meta_xorname, file1_metadata)), - ( - subfolder_entry_hash, - (subfolder_meta_xorname, subfolder_metadata) - ), - ] - .into_iter() - .collect() - ); - assert_eq!( - retrieved_subfolder.entries().await?, - vec![(file2_entry_hash, (file2_meta_xorname, file2_metadata)),] - .into_iter() - .collect() - ); - - Ok(()) -} - -#[tokio::test] -async fn test_folder_merge_changes() -> Result<()> { - let _log_guards = - sn_logging::LogBuilder::init_single_threaded_tokio_test("test_folder_merge_changes", false); - - let owner_sk = SecretKey::random(); - let client = get_new_client(owner_sk.clone()).await?; - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let mut rng = rand::thread_rng(); - let owner_pk = owner_sk.public_key(); - let folder_addr = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - let subfolder_addr = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - - let mut folder_a = FoldersApi::new(client.clone(), wallet_dir, Some(folder_addr))?; - let mut subfolder_a = FoldersApi::new(client.clone(), wallet_dir, Some(subfolder_addr))?; - let file_a1_chunk = random_file_chunk(); - let file_a2_chunk = random_file_chunk(); - - let (file_a1_entry_hash, file_a1_meta_xorname, file_a1_metadata) = - folder_a.add_file("fileA1.txt".into(), file_a1_chunk.clone(), None)?; - let (subfolder_a_entry_hash, subfolder_a_meta_xorname, subfolder_a_metadata) = - folder_a.add_folder("subfolderA".into(), *subfolder_a.address(), None)?; - let (file_a2_entry_hash, file_a2_meta_xorname, file_a2_metadata) = - subfolder_a.add_file("fileA2.txt".into(), file_a2_chunk.clone(), None)?; - - let mut folder_b = FoldersApi::new(client.clone(), wallet_dir, Some(folder_addr))?; - let mut subfolder_b = FoldersApi::new(client.clone(), wallet_dir, Some(subfolder_addr))?; - let file_b1_chunk = random_file_chunk(); - let file_b2_chunk = random_file_chunk(); - - let (file_b1_entry_hash, file_b1_meta_xorname, file_b1_metadata) = - folder_b.add_file("fileB1.txt".into(), file_b1_chunk.clone(), None)?; - let (subfolder_b_entry_hash, subfolder_b_meta_xorname, subfolder_b_metadata) = - folder_b.add_folder("subfolderB".into(), *subfolder_b.address(), None)?; - let (file_b2_entry_hash, file_b2_meta_xorname, file_b2_metadata) = - subfolder_b.add_file("fileB2.txt".into(), file_b2_chunk.clone(), None)?; - - // let's pay for storage - let mut addrs2pay = vec![folder_a.as_net_addr(), subfolder_a.as_net_addr()]; - addrs2pay.extend(folder_a.meta_addrs_to_pay()); - addrs2pay.extend(subfolder_a.meta_addrs_to_pay()); - addrs2pay.extend(folder_b.meta_addrs_to_pay()); - addrs2pay.extend(subfolder_b.meta_addrs_to_pay()); - pay_for_storage(&client, wallet_dir, addrs2pay).await?; - - folder_a.sync(Default::default()).await?; - subfolder_a.sync(Default::default()).await?; - folder_b.sync(Default::default()).await?; - subfolder_b.sync(Default::default()).await?; - folder_a.sync(Default::default()).await?; - subfolder_a.sync(Default::default()).await?; - - let folder_a_entries = folder_a.entries().await?; - let folder_b_entries = folder_b.entries().await?; - let subfolder_a_entries = subfolder_a.entries().await?; - let subfolder_b_entries = subfolder_b.entries().await?; - - assert_eq!(folder_a_entries.len(), 4); - assert_eq!(folder_b_entries.len(), 4); - assert_eq!(subfolder_a_entries.len(), 2); - assert_eq!(subfolder_b_entries.len(), 2); - - assert!(folder_a.contains(&file_a1_entry_hash)); - assert!(folder_a.contains(&file_b1_entry_hash)); - assert!(folder_a.contains(&subfolder_a_entry_hash)); - assert!(folder_a.contains(&subfolder_b_entry_hash)); - assert!(subfolder_a.contains(&file_a2_entry_hash)); - assert!(subfolder_a.contains(&file_b2_entry_hash)); - - assert!(folder_b.contains(&file_a1_entry_hash)); - assert!(folder_b.contains(&file_b1_entry_hash)); - assert!(folder_b.contains(&subfolder_a_entry_hash)); - assert!(folder_b.contains(&subfolder_b_entry_hash)); - assert!(subfolder_b.contains(&file_a2_entry_hash)); - assert!(subfolder_b.contains(&file_b2_entry_hash)); - - assert_eq!( - folder_a.find_by_name("fileA1.txt"), - Some((&file_a1_meta_xorname, &file_a1_metadata)) - ); - assert_eq!( - folder_a.find_by_name("fileB1.txt"), - Some((&file_b1_meta_xorname, &file_b1_metadata)) - ); - assert_eq!( - folder_a.find_by_name("subfolderA"), - Some((&subfolder_a_meta_xorname, &subfolder_a_metadata)) - ); - assert_eq!( - folder_a.find_by_name("subfolderB"), - Some((&subfolder_b_meta_xorname, &subfolder_b_metadata)) - ); - - assert_eq!( - folder_b.find_by_name("fileA1.txt"), - Some((&file_a1_meta_xorname, &file_a1_metadata)) - ); - assert_eq!( - folder_b.find_by_name("fileB1.txt"), - Some((&file_b1_meta_xorname, &file_b1_metadata)) - ); - assert_eq!( - folder_b.find_by_name("subfolderA"), - Some((&subfolder_a_meta_xorname, &subfolder_a_metadata)) - ); - assert_eq!( - folder_b.find_by_name("subfolderB"), - Some((&subfolder_b_meta_xorname, &subfolder_b_metadata)) - ); - - assert_eq!(folder_a_entries, folder_b_entries); - assert_eq!( - folder_a_entries, - vec![ - (file_a1_entry_hash, (file_a1_meta_xorname, file_a1_metadata)), - (file_b1_entry_hash, (file_b1_meta_xorname, file_b1_metadata)), - ( - subfolder_a_entry_hash, - (subfolder_a_meta_xorname, subfolder_a_metadata) - ), - ( - subfolder_b_entry_hash, - (subfolder_b_meta_xorname, subfolder_b_metadata) - ), - ] - .into_iter() - .collect() - ); - - assert_eq!( - subfolder_a.find_by_name("fileA2.txt"), - Some((&file_a2_meta_xorname, &file_a2_metadata)) - ); - assert_eq!( - subfolder_a.find_by_name("fileB2.txt"), - Some((&file_b2_meta_xorname, &file_b2_metadata)) - ); - - assert_eq!(subfolder_a_entries, subfolder_b_entries); - assert_eq!( - subfolder_a_entries, - vec![ - (file_a2_entry_hash, (file_a2_meta_xorname, file_a2_metadata)), - (file_b2_entry_hash, (file_b2_meta_xorname, file_b2_metadata)) - ] - .into_iter() - .collect() - ); - - Ok(()) -} diff --git a/sn_evm/CHANGELOG.md b/sn_evm/CHANGELOG.md new file mode 100644 index 0000000000..ec4c00a34f --- /dev/null +++ b/sn_evm/CHANGELOG.md @@ -0,0 +1,917 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [0.18.6](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.18.5...sn_transfers-v0.18.6) - 2024-06-04 + +### Other +- release +- release + +## [0.18.5](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.18.4...sn_transfers-v0.18.5) - 2024-06-04 + +### Fixed +- *(transfer)* mismatched key shall result in decryption error + +### Other +- *(transfer)* make discord_name decryption backward compatible + +## [0.18.4](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.18.3...sn_transfers-v0.18.4) - 2024-06-03 + +### Fixed +- enable compile time sk setting for faucet/genesis + +## [0.18.2](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.18.1...sn_transfers-v0.18.2) - 2024-06-03 + +### Added +- *(faucet)* write foundation cash note to disk +- *(keys)* enable compile or runtime override of keys + +### Other +- use secrets during build process + +## [0.18.1](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.18.0...sn_transfers-v0.18.1) - 2024-05-24 + +### Added +- use default keys for genesis, or override +- use different key for payment forward +- remove two uneeded env vars +- pass genesis_cn pub fields separate to hide sk +- hide genesis keypair +- hide genesis keypair +- pass sk_str via cli opt +- *(node)* use separate keys of Foundation and Royalty +- *(wallet)* ensure genesis wallet attempts to load from local on init first +- *(faucet)* make gifting server feat dependent +- tracking beta rewards from the DAG +- *(audit)* collect payment forward statistics +- *(node)* periodically forward reward to specific address +- spend reason enum and sized cipher + +### Fixed +- correct genesis_pk naming +- genesis_cn public fields generated from hard coded value +- invalid spend reason in data payments + +### Other +- *(transfers)* comment and naming updates for clarity +- log genesis PK +- rename improperly named foundation_key +- reconfigure local network owner args +- *(refactor)* stabilise node size to 4k records, +- use const for default user or owner +- resolve errors after reverts +- Revert "feat(node): make spend and cash_note reason field configurable" +- Revert "feat: spend shows the purposes of outputs created for" +- Revert "chore: rename output reason to purpose for clarity" +- Revert "feat(cli): track spend creation reasons during audit" +- Revert "chore: refactor CASH_NOTE_REASON strings to consts" +- Revert "chore: address review comments" +- *(node)* use proper SpendReason enum +- add consts + +## [0.18.0-alpha.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.18.0-alpha.0...sn_transfers-v0.18.0-alpha.1) - 2024-05-07 + +### Added +- *(cli)* track spend creation reasons during audit +- spend shows the purposes of outputs created for +- *(node)* make spend and cash_note reason field configurable +- *(cli)* generate a mnemonic as wallet basis if no wallet found +- *(transfers)* do not genereate wallet by default +- [**breaking**] renamings in CashNote +- [**breaking**] rename token to amount in Spend +- unit testing dag, double spend poisoning tweaks + +### Fixed +- create faucet via account load or generation +- transfer tests for HotWallet creation +- *(client)* move acct_packet mnemonic into client layer +- typo + +### Other +- *(versions)* sync versions with latest crates.io vs +- address review comments +- refactor CASH_NOTE_REASON strings to consts +- rename output reason to purpose for clarity +- addres review comments +- *(transfers)* reduce error size +- *(deps)* bump dependencies +- *(transfer)* unit tests for PaymentQuote +- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 +- *(release)* sn_auditor-v0.1.3-alpha.0/sn_client-v0.105.3-alpha.0/sn_networking-v0.14.2-alpha.0/sn_protocol-v0.16.2-alpha.0/sn_build_info-v0.1.7-alpha.0/sn_transfers-v0.17.2-alpha.0/sn_peers_acquisition-v0.2.9-alpha.0/sn_cli-v0.90.3-alpha.0/sn_node-v0.105.4-alpha.0/sn-node-manager-v0.7.3-alpha.0/sn_faucet-v0.4.4-alpha.0/sn_service_management-v0.2.2-alpha.0/sn_node_rpc_client-v0.6.4-alpha.0 + +## [0.17.1](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.17.0...sn_transfers-v0.17.1) - 2024-03-28 + +### Added +- *(transfers)* implement WalletApi to expose common methods + +### Fixed +- *(uploader)* clarify the use of root and wallet dirs + +## [0.17.0](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.5...sn_transfers-v0.17.0) - 2024-03-27 + +### Added +- *(faucet)* rate limit based upon wallet locks +- *(transfers)* enable client to check if a quote has expired +- *(transfers)* [**breaking**] support multiple payments for the same xorname +- use Arc inside Client, Network to reduce clone cost + +### Other +- *(node)* refactor pricing metrics + +## [0.16.5](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.4...sn_transfers-v0.16.5) - 2024-03-21 + +### Added +- refactor DAG, improve error management and security +- dag error recording + +## [0.16.4](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.3...sn_transfers-v0.16.4) - 2024-03-14 + +### Added +- refactor spend validation + +### Other +- improve code quality + +## [0.16.3-alpha.1](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.3-alpha.0...sn_transfers-v0.16.3-alpha.1) - 2024-03-08 + +### Added +- [**breaking**] pretty serialisation for unique keys + +## [0.16.2](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.1...sn_transfers-v0.16.2) - 2024-03-06 + +### Other +- clean swarm commands errs and spend errors + +## [0.16.1](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.0...sn_transfers-v0.16.1) - 2024-03-05 + +### Added +- provide `faucet add` command + +## [0.16.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.9...sn_transfers-v0.16.0) - 2024-02-23 + +### Added +- use the old serialisation as default, add some docs +- warn about old format when detected +- implement backwards compatible deserialisation +- [**breaking**] custom serde for unique keys + +## [0.15.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.7...sn_transfers-v0.15.8) - 2024-02-20 + +### Added +- spend and DAG utilities + +## [0.15.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.6...sn_transfers-v0.15.7) - 2024-02-20 + +### Added +- *(folders)* move folders/files metadata out of Folders entries + +## [0.15.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.5...sn_transfers-v0.15.6) - 2024-02-15 + +### Added +- *(client)* keep payee as part of storage payment cache + +### Other +- minor doc change based on peer review + +## [0.15.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.4...sn_transfers-v0.15.5) - 2024-02-14 + +### Other +- *(refactor)* move mod.rs files the modern way + +## [0.15.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.3...sn_transfers-v0.15.4) - 2024-02-13 + +### Fixed +- manage the genesis spend case + +## [0.15.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.2...sn_transfers-v0.15.3) - 2024-02-08 + +### Other +- copyright update to current year + +## [0.15.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.1...sn_transfers-v0.15.2) - 2024-02-07 + +### Added +- extendable local state DAG in cli + +## [0.15.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.0...sn_transfers-v0.15.1) - 2024-02-06 + +### Fixed +- *(node)* derive reward_key from main keypair + +## [0.15.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.43...sn_transfers-v0.15.0) - 2024-02-02 + +### Other +- *(cli)* minor changes to cli comments +- [**breaking**] renaming LocalWallet to HotWallet as it holds the secret key for signing tx +- *(readme)* add instructions of out-of-band transaction signing + +## [0.14.43](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.42...sn_transfers-v0.14.43) - 2024-01-29 + +### Other +- *(sn_transfers)* making some functions/helpers to be constructor methods of public structs + +## [0.14.42](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.41...sn_transfers-v0.14.42) - 2024-01-25 + +### Added +- client webtransport-websys feat + +## [0.14.41](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.40...sn_transfers-v0.14.41) - 2024-01-24 + +### Fixed +- dont lock files with wasm + +### Other +- make tokio dev dep for transfers + +## [0.14.40](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.39...sn_transfers-v0.14.40) - 2024-01-22 + +### Added +- spend dag utils + +## [0.14.39](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.38...sn_transfers-v0.14.39) - 2024-01-18 + +### Added +- *(faucet)* download snapshot of maid balances + +## [0.14.38](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.37...sn_transfers-v0.14.38) - 2024-01-16 + +### Fixed +- *(wallet)* remove unconfirmed_spends file from disk when all confirmed + +## [0.14.37](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.36...sn_transfers-v0.14.37) - 2024-01-15 + +### Fixed +- *(client)* do not store paying-out cash_notes into disk +- *(client)* cache payments via disk instead of memory map + +### Other +- *(client)* collect wallet handling time statistics + +## [0.14.36](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.35...sn_transfers-v0.14.36) - 2024-01-10 + +### Added +- *(transfers)* exposing APIs to build and send cashnotes from transactions signed offline +- *(transfers)* include the derivation index of inputs for generated unsigned transactions +- *(transfers)* exposing an API to create unsigned transfers to be signed offline later on + +### Other +- fixup send_spends and use ExcessiveNanoValue error +- *(transfers)* solving clippy issues about complex fn args + +## [0.14.35](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.34...sn_transfers-v0.14.35) - 2024-01-09 + +### Added +- *(client)* extra sleep between chunk verification + +## [0.14.34](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.33...sn_transfers-v0.14.34) - 2024-01-09 + +### Added +- *(cli)* safe wallet create saves new key + +## [0.14.33](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.32...sn_transfers-v0.14.33) - 2024-01-08 + +### Other +- more doc updates to readme files + +## [0.14.32](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.31...sn_transfers-v0.14.32) - 2024-01-05 + +### Other +- add clippy unwrap lint to workspace + +## [0.14.31](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.30...sn_transfers-v0.14.31) - 2023-12-19 + +### Added +- network royalties through audit POC + +## [0.14.30](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.29...sn_transfers-v0.14.30) - 2023-12-18 + +### Added +- *(transfers)* spent keys and created for others removed +- *(transfers)* add api for cleaning up CashNotes + +## [0.14.29](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.28...sn_transfers-v0.14.29) - 2023-12-14 + +### Other +- *(protocol)* print the first six hex characters for every address type + +## [0.14.28](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.27...sn_transfers-v0.14.28) - 2023-12-12 + +### Added +- *(transfers)* make wallet read resiliant to concurrent writes + +## [0.14.27](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.26...sn_transfers-v0.14.27) - 2023-12-06 + +### Added +- *(wallet)* basic impl of a watch-only wallet API + +### Other +- *(wallet)* adding unit tests for watch-only wallet impl. +- *(wallet)* another refactoring removing more redundant and unused wallet code +- *(wallet)* major refactoring removing redundant and unused code + +## [0.14.26](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.25...sn_transfers-v0.14.26) - 2023-12-06 + +### Other +- remove some needless cloning +- remove needless pass by value +- use inline format args +- add boilerplate for workspace lints + +## [0.14.25](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.24...sn_transfers-v0.14.25) - 2023-12-05 + +### Fixed +- protect against amounts tampering and incomplete spends attack + +## [0.14.24](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.23...sn_transfers-v0.14.24) - 2023-12-05 + +### Other +- *(transfers)* tidier debug methods for Transactions + +## [0.14.23](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.22...sn_transfers-v0.14.23) - 2023-11-29 + +### Added +- verify all the way to genesis +- verify spends through the cli + +### Fixed +- genesis check security flaw + +## [0.14.22](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.21...sn_transfers-v0.14.22) - 2023-11-28 + +### Added +- *(transfers)* serialise wallets and transfers data with MsgPack instead of bincode + +## [0.14.21](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.20...sn_transfers-v0.14.21) - 2023-11-23 + +### Added +- move derivation index random method to itself + +## [0.14.20](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.19...sn_transfers-v0.14.20) - 2023-11-22 + +### Other +- optimise log format of DerivationIndex + +## [0.14.19](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.18...sn_transfers-v0.14.19) - 2023-11-20 + +### Added +- *(networking)* shortcircuit response sending for replication + +## [0.14.18](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.17...sn_transfers-v0.14.18) - 2023-11-20 + +### Added +- quotes + +### Fixed +- use actual quote instead of dummy + +## [0.14.17](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.16...sn_transfers-v0.14.17) - 2023-11-16 + +### Added +- massive cleaning to prepare for quotes + +### Fixed +- wrong royaltie amount +- cashnote mixup when 2 of them are for the same node + +## [0.14.16](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.15...sn_transfers-v0.14.16) - 2023-11-15 + +### Added +- *(royalties)* make royalties payment to be 15% of the total storage cost + +## [0.14.15](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.14...sn_transfers-v0.14.15) - 2023-11-14 + +### Other +- *(royalties)* verify royalties fees amounts + +## [0.14.14](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.13...sn_transfers-v0.14.14) - 2023-11-10 + +### Added +- *(cli)* attempt to reload wallet from disk if storing it fails when receiving transfers online +- *(cli)* new cmd to listen to royalties payments and deposit them into a local wallet + +## [0.14.13](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.12...sn_transfers-v0.14.13) - 2023-11-10 + +### Other +- *(transfers)* more logs around payments... + +## [0.14.12](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.11...sn_transfers-v0.14.12) - 2023-11-09 + +### Other +- simplify when construct payess for storage + +## [0.14.11](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.10...sn_transfers-v0.14.11) - 2023-11-02 + +### Added +- keep transfers in mem instead of heavy cashnotes + +## [0.14.10](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.9...sn_transfers-v0.14.10) - 2023-11-01 + +### Other +- *(node)* don't log the transfers events + +## [0.14.9](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.8...sn_transfers-v0.14.9) - 2023-10-30 + +### Added +- `bincode::serialize` into `Bytes` without intermediate allocation + +## [0.14.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.7...sn_transfers-v0.14.8) - 2023-10-27 + +### Added +- *(rpc_client)* show total accumulated balance when decrypting transfers received + +## [0.14.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.6...sn_transfers-v0.14.7) - 2023-10-26 + +### Fixed +- typos + +## [0.14.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.5...sn_transfers-v0.14.6) - 2023-10-24 + +### Fixed +- *(tests)* nodes rewards tests to account for repayments amounts + +## [0.14.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.4...sn_transfers-v0.14.5) - 2023-10-24 + +### Added +- *(payments)* adding unencrypted CashNotes for network royalties and verifying correct payment +- *(payments)* network royalties payment made when storing content + +### Other +- *(api)* wallet APIs to account for network royalties fees when returning total cost paid for storage + +## [0.14.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.3...sn_transfers-v0.14.4) - 2023-10-24 + +### Fixed +- *(networking)* only validate _our_ transfers at nodes + +## [0.14.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.2...sn_transfers-v0.14.3) - 2023-10-18 + +### Other +- Revert "feat: keep transfers in mem instead of mem and i/o heavy cashnotes" + +## [0.14.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.1...sn_transfers-v0.14.2) - 2023-10-18 + +### Added +- keep transfers in mem instead of mem and i/o heavy cashnotes + +## [0.14.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.0...sn_transfers-v0.14.1) - 2023-10-17 + +### Fixed +- *(transfers)* dont overwrite existing payment transactions when we top up + +### Other +- adding comments and cleanup around quorum / payment fixes + +## [0.14.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.12...sn_transfers-v0.14.0) - 2023-10-12 + +### Added +- *(sn_transfers)* dont load Cns from disk, store value along w/ pubkey in wallet +- include protection for deposits + +### Fixed +- remove uneeded hideous key Clone trait +- deadlock +- place lock on another file to prevent windows lock issue +- lock wallet file instead of dir +- wallet concurrent access bugs + +### Other +- more detailed logging when client creating store cash_note + +## [0.13.12](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.11...sn_transfers-v0.13.12) - 2023-10-11 + +### Fixed +- expose RecordMismatch errors and cleanup wallet if we hit that + +### Other +- *(transfers)* add somre more clarity around DoubleSpendAttemptedForCashNotes +- *(docs)* cleanup comments and docs +- *(transfers)* remove pointless api + +## [0.13.11](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.10...sn_transfers-v0.13.11) - 2023-10-10 + +### Added +- *(transfer)* special event for transfer notifs over gossipsub + +## [0.13.10](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.9...sn_transfers-v0.13.10) - 2023-10-10 + +### Other +- *(sn_transfers)* improve transaction build mem perf + +## [0.13.9](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.8...sn_transfers-v0.13.9) - 2023-10-06 + +### Added +- feat!(sn_transfers): unify store api for wallet + +### Fixed +- readd api to load cash_notes from disk, update tests + +### Other +- update comments around RecordNotFound +- remove deposit vs received cashnote disctinction + +## [0.13.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.7...sn_transfers-v0.13.8) - 2023-10-06 + +### Other +- fix new clippy errors + +## [0.13.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.6...sn_transfers-v0.13.7) - 2023-10-05 + +### Added +- *(metrics)* enable node monitoring through dockerized grafana instance + +## [0.13.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.5...sn_transfers-v0.13.6) - 2023-10-05 + +### Fixed +- *(client)* remove concurrency limitations + +## [0.13.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.4...sn_transfers-v0.13.5) - 2023-10-05 + +### Fixed +- *(sn_transfers)* be sure we store CashNotes before writing the wallet file + +## [0.13.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.3...sn_transfers-v0.13.4) - 2023-10-05 + +### Added +- use progress bars on `files upload` + +## [0.13.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.2...sn_transfers-v0.13.3) - 2023-10-04 + +### Added +- *(sn_transfers)* impl From for NanoTokens + +### Fixed +- *(sn_transfers)* reuse payment overflow fix + +### Other +- *(sn_transfers)* clippy and fmt +- *(sn_transfers)* add reuse cashnote cases +- separate method and write test + +## [0.13.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.1...sn_transfers-v0.13.2) - 2023-10-02 + +### Added +- remove unused fee output + +## [0.13.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.0...sn_transfers-v0.13.1) - 2023-09-28 + +### Added +- client to client transfers + +## [0.13.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.12.2...sn_transfers-v0.13.0) - 2023-09-27 + +### Added +- deep clean sn_transfers, reduce exposition, remove dead code + +### Fixed +- benches +- uncomment benches in Cargo.toml + +### Other +- optimise bench +- improve cloning +- udeps + +## [0.12.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.12.1...sn_transfers-v0.12.2) - 2023-09-25 + +### Other +- *(transfers)* unused variable removal + +## [0.12.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.12.0...sn_transfers-v0.12.1) - 2023-09-25 + +### Other +- udeps +- cleanup renamings in sn_transfers +- remove mostly outdated mocks + +## [0.12.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.15...sn_transfers-v0.12.0) - 2023-09-21 + +### Added +- rename utxo by CashNoteRedemption +- dusking DBCs + +### Fixed +- udeps +- incompatible hardcoded value, add logs + +### Other +- remove dbc dust comments +- rename Nano NanoTokens +- improve naming + +## [0.11.15](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.14...sn_transfers-v0.11.15) - 2023-09-20 + +### Other +- major dep updates + +## [0.11.14](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.13...sn_transfers-v0.11.14) - 2023-09-18 + +### Added +- serialisation for transfers for out of band sending +- generic transfer receipt + +### Other +- add more docs +- add some docs + +## [0.11.13](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.12...sn_transfers-v0.11.13) - 2023-09-15 + +### Other +- refine log levels + +## [0.11.12](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.11...sn_transfers-v0.11.12) - 2023-09-14 + +### Other +- updated the following local packages: sn_protocol + +## [0.11.11](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.10...sn_transfers-v0.11.11) - 2023-09-13 + +### Added +- *(register)* paying nodes for Register storage + +## [0.11.10](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.9...sn_transfers-v0.11.10) - 2023-09-12 + +### Added +- add tx and parent spends verification +- chunk payments using UTXOs instead of DBCs + +### Other +- use updated sn_dbc + +## [0.11.9](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.8...sn_transfers-v0.11.9) - 2023-09-11 + +### Other +- *(release)* sn_cli-v0.81.29/sn_client-v0.88.16/sn_registers-v0.2.6/sn_node-v0.89.29/sn_testnet-v0.2.120/sn_protocol-v0.6.6 + +## [0.11.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.7...sn_transfers-v0.11.8) - 2023-09-08 + +### Added +- *(client)* repay for chunks if they cannot be validated + +## [0.11.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.6...sn_transfers-v0.11.7) - 2023-09-05 + +### Other +- *(release)* sn_cli-v0.81.21/sn_client-v0.88.11/sn_registers-v0.2.5/sn_node-v0.89.21/sn_testnet-v0.2.112/sn_protocol-v0.6.5 + +## [0.11.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.5...sn_transfers-v0.11.6) - 2023-09-04 + +### Other +- updated the following local packages: sn_protocol + +## [0.11.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.4...sn_transfers-v0.11.5) - 2023-09-04 + +### Other +- updated the following local packages: sn_protocol + +## [0.11.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.3...sn_transfers-v0.11.4) - 2023-09-01 + +### Other +- *(transfers)* batch dbc storage +- *(transfers)* store dbcs by ref to avoid more clones +- *(transfers)* dont pass by value, this is a clone! +- *(client)* make unconfonfirmed txs btreeset, remove unnecessary cloning +- *(transfers)* improve update_local_wallet + +## [0.11.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.2...sn_transfers-v0.11.3) - 2023-08-31 + +### Other +- remove unused async + +## [0.11.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.1...sn_transfers-v0.11.2) - 2023-08-31 + +### Added +- *(node)* node to store rewards in a local wallet + +### Fixed +- *(cli)* don't try to create wallet paths when checking balance + +## [0.11.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.0...sn_transfers-v0.11.1) - 2023-08-31 + +### Other +- updated the following local packages: sn_protocol + +## [0.11.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.28...sn_transfers-v0.11.0) - 2023-08-30 + +### Added +- one transfer per data set, mapped dbcs to content addrs +- [**breaking**] pay each chunk holder direct +- feat!(protocol): gets keys with GetStoreCost +- feat!(protocol): get price and pay for each chunk individually +- feat!(protocol): remove chunk merkletree to simplify payment + +### Fixed +- *(tokio)* remove tokio fs + +### Other +- *(deps)* bump tokio to 1.32.0 +- *(client)* refactor client wallet to reduce dbc clones +- *(client)* pass around content payments map mut ref +- *(client)* error out early for invalid transfers + +## [0.10.28](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.27...sn_transfers-v0.10.28) - 2023-08-24 + +### Other +- rust 1.72.0 fixes + +## [0.10.27](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.26...sn_transfers-v0.10.27) - 2023-08-18 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.26](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.25...sn_transfers-v0.10.26) - 2023-08-11 + +### Added +- *(transfers)* add resend loop for unconfirmed txs + +## [0.10.25](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.24...sn_transfers-v0.10.25) - 2023-08-10 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.24](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.23...sn_transfers-v0.10.24) - 2023-08-08 + +### Added +- *(transfers)* add get largest dbc for spending + +### Fixed +- *(node)* prevent panic in storage calcs + +### Other +- *(faucet)* provide more money +- tidy store cost code + +## [0.10.23](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.22...sn_transfers-v0.10.23) - 2023-08-07 + +### Other +- rename network addresses confusing name method to xorname + +## [0.10.22](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.21...sn_transfers-v0.10.22) - 2023-08-01 + +### Other +- *(networking)* use TOTAL_SUPPLY from sn_transfers + +## [0.10.21](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.20...sn_transfers-v0.10.21) - 2023-08-01 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.20](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.19...sn_transfers-v0.10.20) - 2023-08-01 + +### Other +- *(release)* sn_cli-v0.80.17/sn_client-v0.87.0/sn_registers-v0.2.0/sn_node-v0.88.6/sn_testnet-v0.2.44/sn_protocol-v0.4.2 + +## [0.10.19](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.18...sn_transfers-v0.10.19) - 2023-07-31 + +### Fixed +- *(test)* using proper wallets during data_with_churn test + +## [0.10.18](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.17...sn_transfers-v0.10.18) - 2023-07-28 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.17](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.16...sn_transfers-v0.10.17) - 2023-07-26 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.16](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.15...sn_transfers-v0.10.16) - 2023-07-25 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.15](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.14...sn_transfers-v0.10.15) - 2023-07-21 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.14](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.13...sn_transfers-v0.10.14) - 2023-07-20 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.13](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.12...sn_transfers-v0.10.13) - 2023-07-19 + +### Added +- *(CI)* dbc verfication during network churning test + +## [0.10.12](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.11...sn_transfers-v0.10.12) - 2023-07-19 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.11](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.10...sn_transfers-v0.10.11) - 2023-07-18 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.10](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.9...sn_transfers-v0.10.10) - 2023-07-17 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.9](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.8...sn_transfers-v0.10.9) - 2023-07-17 + +### Added +- *(client)* keep storage payment proofs in local wallet + +## [0.10.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.7...sn_transfers-v0.10.8) - 2023-07-12 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.6...sn_transfers-v0.10.7) - 2023-07-11 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.5...sn_transfers-v0.10.6) - 2023-07-10 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.4...sn_transfers-v0.10.5) - 2023-07-06 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.3...sn_transfers-v0.10.4) - 2023-07-05 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.2...sn_transfers-v0.10.3) - 2023-07-04 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.1...sn_transfers-v0.10.2) - 2023-06-28 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.0...sn_transfers-v0.10.1) - 2023-06-26 + +### Added +- display path when no deposits were found upon wallet deposit failure + +### Other +- adding proptests for payment proofs merkletree utilities +- payment proof map to use xorname as index instead of merkletree nodes type +- having the payment proof validation util to return the item's leaf index + +## [0.10.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.8...sn_transfers-v0.10.0) - 2023-06-22 + +### Added +- use standarised directories for files/wallet commands + +## [0.9.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.7...sn_transfers-v0.9.8) - 2023-06-21 + +### Other +- updated the following local packages: sn_protocol + +## [0.9.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.6...sn_transfers-v0.9.7) - 2023-06-21 + +### Fixed +- *(sn_transfers)* hardcode new genesis DBC for tests + +### Other +- *(node)* obtain parent_tx from SignedSpend + +## [0.9.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.5...sn_transfers-v0.9.6) - 2023-06-20 + +### Other +- updated the following local packages: sn_protocol + +## [0.9.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.4...sn_transfers-v0.9.5) - 2023-06-20 + +### Other +- specific error types for different payment proof verification scenarios + +## [0.9.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.3...sn_transfers-v0.9.4) - 2023-06-15 + +### Added +- add double spend test + +### Fixed +- parent spend checks +- parent spend issue + +## [0.9.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.2...sn_transfers-v0.9.3) - 2023-06-14 + +### Added +- include output DBC within payment proof for Chunks storage + +## [0.9.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.1...sn_transfers-v0.9.2) - 2023-06-12 + +### Added +- remove spendbook rw locks, improve logging + +## [0.9.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.0...sn_transfers-v0.9.1) - 2023-06-09 + +### Other +- manually change crate version diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml new file mode 100644 index 0000000000..73326d9f36 --- /dev/null +++ b/sn_evm/Cargo.toml @@ -0,0 +1,41 @@ +[package] +authors = ["MaidSafe Developers "] +description = "Safe Network EVM Transfers" +documentation = "https://docs.rs/sn_node" +edition = "2021" +homepage = "https://maidsafe.net" +license = "GPL-3.0" +name = "sn_evm" +readme = "README.md" +repository = "https://github.com/maidsafe/safe_network" +version = "0.1.1" + +[features] +test-utils = [] +local = ["evmlib/local"] + +[dependencies] +custom_debug = "~0.6.1" +evmlib = { path = "../evmlib", version = "0.1.1" } +hex = "~0.4.3" +lazy_static = "~1.4.0" +libp2p = { version = "0.53", features = ["identify", "kad"] } +rand = { version = "~0.8.5", features = ["small_rng"] } +rmp-serde = "1.1.1" +serde = { version = "1.0.133", features = ["derive", "rc"] } +serde_json = "1.0.108" +thiserror = "1.0.24" +tiny-keccak = { version = "~2.0.2", features = ["sha3"] } +tracing = { version = "~0.1.26" } +xor_name = "5.0.0" +ring = "0.17.8" +tempfile = "3.10.1" + +[dev-dependencies] +tokio = { version = "1.32.0", features = ["macros", "rt"] } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +wasmtimer = { version = "0.2.0", features = ["serde"] } + +[lints] +workspace = true diff --git a/sn_evm/README.md b/sn_evm/README.md new file mode 100644 index 0000000000..f17bfee940 --- /dev/null +++ b/sn_evm/README.md @@ -0,0 +1,46 @@ +# Safe Network EVM data payments + +This crate contains the logic for data payments on the SAFE Network using the Ethereum protocol. + +This crate provides a set of types and utilities for interacting with EVM-based networks. It offers abstraction over common tasks such as handling addresses, wallets, payments, and network configurations. Below is an overview of the main types exposed by the crate. + +## Exposed Types + +### RewardsAddress +Alias for `evmlib::common::Address`. Represents an EVM-compatible address used for handling rewards. + +### QuoteHash +Represents a unique hash identifying a quote. Useful for referencing and verifying `PaymentQuote`. + +### TxHash +Represents the transaction hash. Useful for identifying and tracking transactions on the blockchain. + +### EvmWallet +Alias for `evmlib::wallet::Wallet`. A wallet used to interact with EVM-compatible networks, providing key management and signing functionality. + +### EvmNetworkCustom +A custom network type that allows for interaction with custom EVM-based networks. + +### EvmNetwork +A standard network type for EVM-based networks such as Ethereum or ArbitrumOne. + +### PaymentQuote +Represents a quote for a payment transaction. Contains relevant data for processing payments through EVM-based networks. + +### QuotingMetrics +Represents metrics associated with generating a payment quote. Useful for performance measurement and optimization. + +### ProofOfPayment +Contains proof of a successful payment on an EVM-based network. Includes data like transaction hash and confirmation details. + +### Amount +Represents a general amount of tokens. Can be used to define any token value in a flexible way. + +### AttoTokens +Represents an amount in the smallest token unit, commonly "atto" (10^-18). Useful for working with precise amounts in smart contracts. + +### EvmError +A custom error type used for handling EVM-related errors within the library. + +### Result +A specialized `Result` type that wraps around `EvmError`. Standardizes error handling across operations. diff --git a/sn_evm/src/amount.rs b/sn_evm/src/amount.rs new file mode 100644 index 0000000000..be25546042 --- /dev/null +++ b/sn_evm/src/amount.rs @@ -0,0 +1,259 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::{EvmError, Result}; + +pub use evmlib::common::Amount; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{self, Display, Formatter}, + str::FromStr, +}; + +/// The conversion from AttoTokens to raw value +const TOKEN_TO_RAW_POWER_OF_10_CONVERSION: u64 = 18; +/// The conversion from AttoTokens to raw value +const TOKEN_TO_RAW_CONVERSION: u64 = 1_000_000_000_000_000_000; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +/// An amount in SNT Atto. 10^18 Nanos = 1 SNT. +pub struct AttoTokens(Amount); + +impl AttoTokens { + /// Type safe representation of zero AttoTokens. + pub const fn zero() -> Self { + Self(Amount::ZERO) + } + + /// Returns whether it's a representation of zero AttoTokens. + pub fn is_zero(&self) -> bool { + self.0.is_zero() + } + + /// New value from an amount + pub fn from_atto(value: Amount) -> Self { + Self(value) + } + + /// New value from a number of atto tokens. + pub fn from_u64(value: u64) -> Self { + Self(Amount::from(value)) + } + + /// New value from a number of atto tokens. + pub fn from_u128(value: u128) -> Self { + Self(Amount::from(value)) + } + + /// Total AttoTokens expressed in number of nano tokens. + pub fn as_atto(self) -> Amount { + self.0 + } + + /// Computes `self + rhs`, returning `None` if overflow occurred. + pub fn checked_add(self, rhs: AttoTokens) -> Option { + self.0.checked_add(rhs.0).map(Self::from_atto) + } + + /// Computes `self - rhs`, returning `None` if overflow occurred. + pub fn checked_sub(self, rhs: AttoTokens) -> Option { + self.0.checked_sub(rhs.0).map(Self::from_atto) + } + + /// Converts the Nanos into bytes + pub fn to_bytes(&self) -> Vec { + self.0.as_le_bytes().to_vec() + } +} + +impl From for AttoTokens { + fn from(value: u64) -> Self { + Self(Amount::from(value)) + } +} + +impl From for AttoTokens { + fn from(value: Amount) -> Self { + Self(value) + } +} + +impl FromStr for AttoTokens { + type Err = EvmError; + + fn from_str(value_str: &str) -> Result { + let mut itr = value_str.splitn(2, '.'); + let converted_units = { + let units = itr + .next() + .and_then(|s| s.parse::().ok()) + .ok_or_else(|| { + EvmError::FailedToParseAttoToken("Can't parse token units".to_string()) + })?; + + units + .checked_mul(Amount::from(TOKEN_TO_RAW_CONVERSION)) + .ok_or(EvmError::ExcessiveValue)? + }; + + let remainder = { + let remainder_str = itr.next().unwrap_or_default().trim_end_matches('0'); + + if remainder_str.is_empty() { + Amount::ZERO + } else { + let parsed_remainder = remainder_str.parse::().map_err(|_| { + EvmError::FailedToParseAttoToken("Can't parse token remainder".to_string()) + })?; + + let remainder_conversion = TOKEN_TO_RAW_POWER_OF_10_CONVERSION + .checked_sub(remainder_str.len() as u64) + .ok_or(EvmError::LossOfPrecision)?; + parsed_remainder * Amount::from(10).pow(Amount::from(remainder_conversion)) + } + }; + + Ok(Self(converted_units + remainder)) + } +} + +impl Display for AttoTokens { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + let unit = self.0 / Amount::from(TOKEN_TO_RAW_CONVERSION); + let remainder = self.0 % Amount::from(TOKEN_TO_RAW_CONVERSION); + write!(formatter, "{unit}.{remainder:09}") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn from_str() -> Result<()> { + assert_eq!(AttoTokens::from_u64(0), AttoTokens::from_str("0")?); + assert_eq!(AttoTokens::from_u64(0), AttoTokens::from_str("0.")?); + assert_eq!(AttoTokens::from_u64(0), AttoTokens::from_str("0.0")?); + assert_eq!( + AttoTokens::from_u64(1), + AttoTokens::from_str("0.000000000000000001")? + ); + assert_eq!( + AttoTokens::from_u64(1_000_000_000_000_000_000), + AttoTokens::from_str("1")? + ); + assert_eq!( + AttoTokens::from_u64(1_000_000_000_000_000_000), + AttoTokens::from_str("1.")? + ); + assert_eq!( + AttoTokens::from_u64(1_000_000_000_000_000_000), + AttoTokens::from_str("1.0")? + ); + assert_eq!( + AttoTokens::from_u64(1_000_000_000_000_000_001), + AttoTokens::from_str("1.000000000000000001")? + ); + assert_eq!( + AttoTokens::from_u64(1_100_000_000), + AttoTokens::from_str("1.1")? + ); + assert_eq!( + AttoTokens::from_u64(1_100_000_000_000_000_001), + AttoTokens::from_str("1.100000000000000001")? + ); + assert_eq!( + AttoTokens::from_u128(4_294_967_295_000_000_000_000_000_000u128), + AttoTokens::from_str("4294967295")? + ); + assert_eq!( + AttoTokens::from_u128(4_294_967_295_999_999_999_000_000_000_000_000u128), + AttoTokens::from_str("4294967295.999999999")?, + ); + assert_eq!( + AttoTokens::from_u128(4_294_967_295_999_999_999_000_000_000_000_000u128), + AttoTokens::from_str("4294967295.9999999990000")?, + ); + + assert_eq!( + Err(EvmError::FailedToParseAttoToken( + "Can't parse token units".to_string() + )), + AttoTokens::from_str("a") + ); + assert_eq!( + Err(EvmError::FailedToParseAttoToken( + "Can't parse token remainder".to_string() + )), + AttoTokens::from_str("0.a") + ); + assert_eq!( + Err(EvmError::FailedToParseAttoToken( + "Can't parse token remainder".to_string() + )), + AttoTokens::from_str("0.0.0") + ); + assert_eq!( + Err(EvmError::LossOfPrecision), + AttoTokens::from_str("0.0000000009") + ); + assert_eq!( + Err(EvmError::ExcessiveValue), + AttoTokens::from_str("18446744074") + ); + Ok(()) + } + + #[test] + fn display() { + assert_eq!("0.000000000", format!("{}", AttoTokens::from_u64(0))); + assert_eq!("0.000000001", format!("{}", AttoTokens::from_u64(1))); + assert_eq!("0.000000010", format!("{}", AttoTokens::from_u64(10))); + assert_eq!( + "1.000000000", + format!("{}", AttoTokens::from_u64(1_000_000_000_000_000_000)) + ); + assert_eq!( + "1.000000001", + format!("{}", AttoTokens::from_u64(1_000_000_000_000_000_001)) + ); + assert_eq!( + "4294967295.000000000", + format!("{}", AttoTokens::from_u64(4_294_967_295_000_000_000)) + ); + } + + #[test] + fn checked_add_sub() { + assert_eq!( + Some(AttoTokens::from_u64(3)), + AttoTokens::from_u64(1).checked_add(AttoTokens::from_u64(2)) + ); + assert_eq!( + None, + AttoTokens::from_u64(u64::MAX).checked_add(AttoTokens::from_u64(1)) + ); + assert_eq!( + None, + AttoTokens::from_u64(u64::MAX).checked_add(AttoTokens::from_u64(u64::MAX)) + ); + + assert_eq!( + Some(AttoTokens::from_u64(0)), + AttoTokens::from_u64(u64::MAX).checked_sub(AttoTokens::from_u64(u64::MAX)) + ); + assert_eq!( + None, + AttoTokens::from_u64(0).checked_sub(AttoTokens::from_u64(u64::MAX)) + ); + assert_eq!( + None, + AttoTokens::from_u64(10).checked_sub(AttoTokens::from_u64(11)) + ); + } +} diff --git a/sn_evm/src/data_payments.rs b/sn_evm/src/data_payments.rs new file mode 100644 index 0000000000..688d11b621 --- /dev/null +++ b/sn_evm/src/data_payments.rs @@ -0,0 +1,376 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::{AttoTokens, EvmError}; +use evmlib::common::TxHash; +use evmlib::{ + common::{Address as RewardsAddress, QuoteHash}, + utils::dummy_address, +}; +use libp2p::{identity::PublicKey, PeerId}; +use serde::{Deserialize, Serialize}; +#[cfg(not(target_arch = "wasm32"))] +pub use std::time::SystemTime; +#[cfg(target_arch = "wasm32")] +pub use wasmtimer::std::SystemTime; +use xor_name::XorName; + +/// The time in seconds that a quote is valid for +pub const QUOTE_EXPIRATION_SECS: u64 = 3600; + +/// The margin allowed for live_time +const LIVE_TIME_MARGIN: u64 = 10; + +/// The proof of payment for a data payment +#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)] +pub struct ProofOfPayment { + /// The Quote we're paying for + pub quote: PaymentQuote, + /// The transaction hash + pub tx_hash: TxHash, +} + +impl ProofOfPayment { + pub fn to_peer_id_payee(&self) -> Option { + let pub_key = PublicKey::try_decode_protobuf(&self.quote.pub_key).ok()?; + Some(PeerId::from_public_key(&pub_key)) + } +} + +/// Quoting metrics that got used to generate a quote, or to track peer's status. +#[derive( + Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize, custom_debug::Debug, +)] +pub struct QuotingMetrics { + /// the records stored + pub close_records_stored: usize, + /// the max_records configured + pub max_records: usize, + /// number of times that got paid + pub received_payment_count: usize, + /// the duration that node keeps connected to the network, measured in hours + /// TODO: take `restart` into accout + pub live_time: u64, +} + +impl QuotingMetrics { + /// construct an empty QuotingMetrics + pub fn new() -> Self { + Self { + close_records_stored: 0, + max_records: 0, + received_payment_count: 0, + live_time: 0, + } + } +} + +impl Default for QuotingMetrics { + fn default() -> Self { + Self::new() + } +} + +/// A payment quote to store data given by a node to a client +/// Note that the PaymentQuote is a contract between the node and itself to make sure the clients aren’t mispaying. +/// It is NOT a contract between the client and the node. +#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize, custom_debug::Debug)] +pub struct PaymentQuote { + /// the content paid for + pub content: XorName, + /// how much the node demands for storing the content + pub cost: AttoTokens, + /// the local node time when the quote was created + pub timestamp: SystemTime, + /// quoting metrics being used to generate this quote + pub quoting_metrics: QuotingMetrics, + /// list of bad_nodes that client shall not pick as a payee + /// in `serialised` format to avoid cyclic dependent on sn_protocol + #[debug(skip)] + pub bad_nodes: Vec, + /// the node's wallet address + pub rewards_address: RewardsAddress, + /// the node's libp2p identity public key in bytes (PeerId) + #[debug(skip)] + pub pub_key: Vec, + /// the node's signature for the quote + #[debug(skip)] + pub signature: Vec, +} + +impl PaymentQuote { + /// create an empty PaymentQuote + pub fn zero() -> Self { + Self { + content: Default::default(), + cost: AttoTokens::zero(), + timestamp: SystemTime::now(), + quoting_metrics: Default::default(), + bad_nodes: vec![], + rewards_address: dummy_address(), + pub_key: vec![], + signature: vec![], + } + } + + pub fn hash(&self) -> QuoteHash { + let mut bytes = self.bytes_for_sig(); + bytes.extend_from_slice(self.pub_key.as_slice()); + bytes.extend_from_slice(self.signature.as_slice()); + evmlib::cryptography::hash(bytes) + } + + /// returns the bytes to be signed from the given parameters + pub fn bytes_for_signing( + xorname: XorName, + cost: AttoTokens, + timestamp: SystemTime, + quoting_metrics: &QuotingMetrics, + serialised_bad_nodes: &[u8], + rewards_address: &RewardsAddress, + ) -> Vec { + let mut bytes = xorname.to_vec(); + bytes.extend_from_slice(&cost.to_bytes()); + bytes.extend_from_slice( + ×tamp + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Unix epoch to be in the past") + .as_secs() + .to_le_bytes(), + ); + let serialised_quoting_metrics = rmp_serde::to_vec(quoting_metrics).unwrap_or_default(); + bytes.extend_from_slice(&serialised_quoting_metrics); + bytes.extend_from_slice(serialised_bad_nodes); + bytes.extend_from_slice(rewards_address.as_slice()); + bytes + } + + /// Returns the bytes to be signed from self + pub fn bytes_for_sig(&self) -> Vec { + Self::bytes_for_signing( + self.content, + self.cost, + self.timestamp, + &self.quoting_metrics, + &self.bad_nodes, + &self.rewards_address, + ) + } + + /// Returns the peer id of the node that created the quote + pub fn peer_id(&self) -> Result { + if let Ok(pub_key) = libp2p::identity::PublicKey::try_decode_protobuf(&self.pub_key) { + Ok(PeerId::from(pub_key.clone())) + } else { + error!("Cann't parse PublicKey from protobuf"); + Err(EvmError::InvalidQuotePublicKey) + } + } + + /// Check self is signed by the claimed peer + pub fn check_is_signed_by_claimed_peer(&self, claimed_peer: PeerId) -> bool { + let pub_key = if let Ok(pub_key) = PublicKey::try_decode_protobuf(&self.pub_key) { + pub_key + } else { + error!("Cann't parse PublicKey from protobuf"); + return false; + }; + + let self_peer_id = PeerId::from(pub_key.clone()); + + if self_peer_id != claimed_peer { + error!("This quote {self:?} of {self_peer_id:?} is not signed by {claimed_peer:?}"); + return false; + } + + let bytes = self.bytes_for_sig(); + + if !pub_key.verify(&bytes, &self.signature) { + error!("Signature is not signed by claimed pub_key"); + return false; + } + + true + } + + /// Returns true) if the quote has not yet expired + pub fn has_expired(&self) -> bool { + let now = SystemTime::now(); + + let dur_s = match now.duration_since(self.timestamp) { + Ok(dur) => dur.as_secs(), + Err(_) => return true, + }; + dur_s > QUOTE_EXPIRATION_SECS + } + + /// test utility to create a dummy quote + pub fn test_dummy(xorname: XorName, cost: AttoTokens) -> Self { + Self { + content: xorname, + cost, + timestamp: SystemTime::now(), + quoting_metrics: Default::default(), + bad_nodes: vec![], + pub_key: vec![], + signature: vec![], + rewards_address: dummy_address(), + } + } + + /// Check whether self is newer than the target quote. + pub fn is_newer_than(&self, other: &Self) -> bool { + self.timestamp > other.timestamp + } + + /// Check against a new quote, verify whether it is a valid one from self perspective. + /// Returns `true` to flag the `other` quote is valid, from self perspective. + pub fn historical_verify(&self, other: &Self) -> bool { + // There is a chance that an old quote got used later than a new quote + let self_is_newer = self.is_newer_than(other); + let (old_quote, new_quote) = if self_is_newer { + (other, self) + } else { + (self, other) + }; + + if new_quote.quoting_metrics.live_time < old_quote.quoting_metrics.live_time { + info!("Claimed live_time out of sequence"); + return false; + } + + let old_elapsed = if let Ok(elapsed) = old_quote.timestamp.elapsed() { + elapsed + } else { + info!("timestamp failure"); + return false; + }; + let new_elapsed = if let Ok(elapsed) = new_quote.timestamp.elapsed() { + elapsed + } else { + info!("timestamp failure"); + return false; + }; + + let time_diff = old_elapsed.as_secs().saturating_sub(new_elapsed.as_secs()); + let live_time_diff = + new_quote.quoting_metrics.live_time - old_quote.quoting_metrics.live_time; + // In theory, these two shall match, give it a LIVE_TIME_MARGIN to avoid system glitch + if live_time_diff > time_diff + LIVE_TIME_MARGIN { + info!("claimed live_time out of sync with the timestamp"); + return false; + } + + // There could be pruning to be undertaken, also the close range keeps changing as well. + // Hence `close_records_stored` could be growing or shrinking. + // Currently not to carry out check on it, just logging to observe the trend. + debug!( + "The new quote has {} close records stored, meanwhile old one has {}.", + new_quote.quoting_metrics.close_records_stored, + old_quote.quoting_metrics.close_records_stored + ); + + // TODO: Double check if this applies, as this will prevent a node restart with same ID + if new_quote.quoting_metrics.received_payment_count + < old_quote.quoting_metrics.received_payment_count + { + info!("claimed received_payment_count out of sequence"); + return false; + } + + true + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use libp2p::identity::Keypair; + use std::{thread::sleep, time::Duration}; + + #[test] + fn test_is_newer_than() { + let old_quote = PaymentQuote::zero(); + sleep(Duration::from_millis(100)); + let new_quote = PaymentQuote::zero(); + assert!(new_quote.is_newer_than(&old_quote)); + assert!(!old_quote.is_newer_than(&new_quote)); + } + + #[test] + fn test_is_signed_by_claimed_peer() { + let keypair = Keypair::generate_ed25519(); + let peer_id = keypair.public().to_peer_id(); + + let false_peer = PeerId::random(); + + let mut quote = PaymentQuote::zero(); + let bytes = quote.bytes_for_sig(); + let signature = if let Ok(sig) = keypair.sign(&bytes) { + sig + } else { + panic!("Cannot sign the quote!"); + }; + + // Check failed with both incorrect pub_key and signature + assert!(!quote.check_is_signed_by_claimed_peer(peer_id)); + assert!(!quote.check_is_signed_by_claimed_peer(false_peer)); + + // Check failed with correct pub_key but incorrect signature + quote.pub_key = keypair.public().encode_protobuf(); + assert!(!quote.check_is_signed_by_claimed_peer(peer_id)); + assert!(!quote.check_is_signed_by_claimed_peer(false_peer)); + + // Check succeed with correct pub_key and signature, + // and failed with incorrect claimed signer (peer) + quote.signature = signature; + assert!(quote.check_is_signed_by_claimed_peer(peer_id)); + assert!(!quote.check_is_signed_by_claimed_peer(false_peer)); + + // Check failed with incorrect pub_key but correct signature + quote.pub_key = Keypair::generate_ed25519().public().encode_protobuf(); + assert!(!quote.check_is_signed_by_claimed_peer(peer_id)); + assert!(!quote.check_is_signed_by_claimed_peer(false_peer)); + } + + #[test] + fn test_historical_verify() { + let mut old_quote = PaymentQuote::zero(); + sleep(Duration::from_millis(100)); + let mut new_quote = PaymentQuote::zero(); + + // historical_verify will swap quotes to compare based on timeline automatically + assert!(new_quote.historical_verify(&old_quote)); + assert!(old_quote.historical_verify(&new_quote)); + + // Out of sequence received_payment_count shall be detected + old_quote.quoting_metrics.received_payment_count = 10; + new_quote.quoting_metrics.received_payment_count = 9; + assert!(!new_quote.historical_verify(&old_quote)); + assert!(!old_quote.historical_verify(&new_quote)); + // Reset to correct one + new_quote.quoting_metrics.received_payment_count = 11; + assert!(new_quote.historical_verify(&old_quote)); + assert!(old_quote.historical_verify(&new_quote)); + + // Out of sequence live_time shall be detected + new_quote.quoting_metrics.live_time = 10; + old_quote.quoting_metrics.live_time = 11; + assert!(!new_quote.historical_verify(&old_quote)); + assert!(!old_quote.historical_verify(&new_quote)); + // Out of margin live_time shall be detected + new_quote.quoting_metrics.live_time = 11 + LIVE_TIME_MARGIN + 1; + assert!(!new_quote.historical_verify(&old_quote)); + assert!(!old_quote.historical_verify(&new_quote)); + // Reset live_time to be within the margin + new_quote.quoting_metrics.live_time = 11 + LIVE_TIME_MARGIN - 1; + assert!(new_quote.historical_verify(&old_quote)); + assert!(old_quote.historical_verify(&new_quote)); + } +} diff --git a/sn_evm/src/error.rs b/sn_evm/src/error.rs new file mode 100644 index 0000000000..386683b5aa --- /dev/null +++ b/sn_evm/src/error.rs @@ -0,0 +1,33 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::AttoTokens; +use thiserror::Error; + +/// Specialisation of `std::Result`. +pub type Result = std::result::Result; + +#[allow(clippy::large_enum_variant)] +#[derive(Error, Debug, Clone, PartialEq)] +#[non_exhaustive] +/// Transfer errors +pub enum EvmError { + #[error("Lost precision on the number of coins during parsing.")] + LossOfPrecision, + #[error("The token amount would exceed the maximum value")] + ExcessiveValue, + #[error("Failed to parse: {0}")] + FailedToParseAttoToken(String), + #[error("Overflow occurred while adding values")] + NumericOverflow, + #[error("Not enough balance, {0} available, {1} required")] + NotEnoughBalance(AttoTokens, AttoTokens), + + #[error("Invalid quote public key")] + InvalidQuotePublicKey, +} diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs new file mode 100644 index 0000000000..a62fa5c0fd --- /dev/null +++ b/sn_evm/src/lib.rs @@ -0,0 +1,31 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#[macro_use] +extern crate tracing; + +pub use evmlib::common::Address as RewardsAddress; +pub use evmlib::common::QuotePayment; +pub use evmlib::common::{QuoteHash, TxHash}; +pub use evmlib::utils; +pub use evmlib::utils::get_evm_network_from_env; +pub use evmlib::utils::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; +pub use evmlib::wallet::Error as EvmWalletError; +pub use evmlib::wallet::Wallet as EvmWallet; +pub use evmlib::CustomNetwork; +pub use evmlib::Network as EvmNetwork; + +mod amount; +mod data_payments; +mod error; + +pub use data_payments::{PaymentQuote, ProofOfPayment, QuotingMetrics}; + +/// Types used in the public API +pub use amount::{Amount, AttoTokens}; +pub use error::{EvmError, Result}; diff --git a/sn_faucet/CHANGELOG.md b/sn_faucet/CHANGELOG.md deleted file mode 100644 index e8b9817648..0000000000 --- a/sn_faucet/CHANGELOG.md +++ /dev/null @@ -1,1355 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [0.4.27](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.26...sn_faucet-v0.4.27) - 2024-06-04 - -### Other -- release -- release -- *(release)* sn_client-v0.107.5/sn_networking-v0.16.3/sn_cli-v0.93.4/sn_node-v0.107.4/node-launchpad-v0.3.5/sn-node-manager-v0.9.4/sn_auditor-v0.1.23/sn_peers_acquisition-v0.3.3/sn_faucet-v0.4.25/sn_node_rpc_client-v0.6.22 -- *(release)* sn_auditor-v0.1.22/sn_faucet-v0.4.24/node-launchpad-v0.3.4 - -## [0.4.26](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.25...sn_faucet-v0.4.26) - 2024-06-04 - -### Other -- updated the following local packages: sn_client, sn_transfers, sn_cli - -## [0.4.25](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.24...sn_faucet-v0.4.25) - 2024-06-04 - -### Other -- updated the following local packages: sn_client, sn_cli - -## [0.4.24](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.23...sn_faucet-v0.4.24) - 2024-06-04 - -### Other -- remove gifting and start initial data uploads - -## [0.4.23](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.22...sn_faucet-v0.4.23) - 2024-06-04 - -### Added -- *(faucet_server)* download and upload gutenberger book part by part - -## [0.4.22](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.21...sn_faucet-v0.4.22) - 2024-06-03 - -### Other -- updated the following local packages: sn_client, sn_transfers, sn_cli - -## [0.4.20](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.19...sn_faucet-v0.4.20) - 2024-06-03 - -### Added -- *(faucet_server)* upload sample files and print head_addresses -- *(faucet_server)* download some iso files during startup - -### Other -- no openssl dep for faucet - -## [0.4.19](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.18...sn_faucet-v0.4.19) - 2024-05-24 - -### Added -- *(faucet)* allow gifting by default -- *(faucet)* increase initial balance -- *(faucet)* make gifting server feat dependent -- *(faucet)* send small amount to faucet, rest to foundation -- *(faucet)* add feat for gifting-from-genesis -- faucet donate endpoint to feed the faucet -- *(faucet)* fully limit any concurrency -- *(faucet)* log from sn_client -- report protocol mismatch error - -### Fixed -- *(faucet)* cleanup unused vars -- *(faucet)* rate limit before getting wallet -- *(faucet)* ensure faucet is funded in main fn -- update calls to HotWallet::load -- *(faucet)* fix distribution 'from' wallet loading -- *(client)* move acct_packet mnemonic into client layer - -### Other -- enable default features during faucet release -- *(release)* sn_auditor-v0.1.16/sn_cli-v0.91.4/sn_faucet-v0.4.18/sn_metrics-v0.1.7/sn_node-v0.106.4/sn_service_management-v0.2.8/node-launchpad-v0.1.5/sn-node-manager-v0.7.7/sn_node_rpc_client-v0.6.17 -- *(release)* sn_auditor-v0.1.15/sn_cli-v0.91.3/sn_faucet-v0.4.17/sn_metrics-v0.1.6/sn_node-v0.106.3/sn_service_management-v0.2.7/node-launchpad-v0.1.2/sn_node_rpc_client-v0.6.16 -- *(release)* sn_client-v0.106.2/sn_networking-v0.15.2/sn_cli-v0.91.2/sn_node-v0.106.2/sn_auditor-v0.1.14/sn_faucet-v0.4.16/sn_node_rpc_client-v0.6.15 -- *(release)* sn_auditor-v0.1.13/sn_client-v0.106.1/sn_networking-v0.15.1/sn_protocol-v0.16.6/sn_cli-v0.91.1/sn_faucet-v0.4.15/sn_node-v0.106.1/node-launchpad-v0.1.1/sn_node_rpc_client-v0.6.14/sn_peers_acquisition-v0.2.12/sn_service_management-v0.2.6 -- *(release)* sn_auditor-v0.1.12/sn_client-v0.106.0/sn_networking-v0.15.0/sn_transfers-v0.18.0/sn_peers_acquisition-v0.2.11/sn_logging-v0.2.26/sn_cli-v0.91.0/sn_faucet-v0.4.14/sn_metrics-v0.1.5/sn_node-v0.106.0/sn_service_management-v0.2.5/test_utils-v0.4.1/node-launchpad-v/sn-node-manager-v0.7.5/sn_node_rpc_client-v0.6.13/token_supplies-v0.1.48/sn_protocol-v0.16.5 -- *(versions)* sync versions with latest crates.io vs -- addres review comments -- *(faucet)* log initilization failure and upload faucet log -- *(CI)* upload faucet log during CI -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(deps)* bump dependencies - -## [0.4.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.17...sn_faucet-v0.4.18) - 2024-05-20 - -### Other -- update Cargo.lock dependencies - -## [0.4.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.16...sn_faucet-v0.4.17) - 2024-05-15 - -### Other -- update Cargo.lock dependencies - -## [0.4.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.15...sn_faucet-v0.4.16) - 2024-05-09 - -### Other -- updated the following local packages: sn_client - -## [0.4.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.14...sn_faucet-v0.4.15) - 2024-05-08 - -### Other -- update Cargo.lock dependencies - -## [0.4.14-alpha.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.14-alpha.0...sn_faucet-v0.4.14-alpha.1) - 2024-05-07 - -### Added -- faucet donate endpoint to feed the faucet -- *(faucet)* fully limit any concurrency -- *(faucet)* log from sn_client -- report protocol mismatch error - -### Fixed -- *(faucet)* cleanup unused vars -- *(faucet)* rate limit before getting wallet -- *(faucet)* ensure faucet is funded in main fn -- update calls to HotWallet::load -- *(faucet)* fix distribution 'from' wallet loading -- *(client)* move acct_packet mnemonic into client layer - -### Other -- *(versions)* sync versions with latest crates.io vs -- addres review comments -- *(faucet)* log initilization failure and upload faucet log -- *(CI)* upload faucet log during CI -- *(deps)* bump dependencies - -## [0.4.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.2...sn_faucet-v0.4.3) - 2024-03-28 - -### Other -- updated the following local packages: sn_client - -## [0.4.2](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.1...sn_faucet-v0.4.2) - 2024-03-28 - -### Fixed -- *(faucet)* bind to wan - -## [0.4.1](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.0...sn_faucet-v0.4.1) - 2024-03-28 - -### Fixed -- *(faucet)* add build info) - -## [0.4.0](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.85...sn_faucet-v0.4.0) - 2024-03-27 - -### Added -- *(faucet)* rate limit based upon wallet locks -- *(faucet)* start using warp for simpler server tweaks -- only give out 1snt per req -- make logging simpler to use -- [**breaking**] remove gossip code - -## [0.3.85](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.84...sn_faucet-v0.3.85) - 2024-03-21 - -### Added -- *(log)* set log levels on the fly - -### Other -- *(release)* sn_cli-v0.89.84/sn_node-v0.104.40/sn_networking-v0.13.34/sn_service_management-v0.1.1/sn_client-v0.104.30 - -## [0.3.84](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.83...sn_faucet-v0.3.84) - 2024-03-14 - -### Other -- *(release)* sn_transfers-v0.16.3/sn_cli-v0.89.82 - -## [0.3.83](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.82-alpha.0...sn_faucet-v0.3.83) - 2024-03-08 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.81](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.80...sn_faucet-v0.3.81) - 2024-03-06 - -### Added -- provide `faucet add` command -- *(faucet)* claim using signature of safe wallet - -### Other -- *(release)* sn_transfers-v0.16.1 - -## [0.3.80](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.79...sn_faucet-v0.3.80) - 2024-02-23 - -### Other -- update Cargo.lock dependencies - -## [0.3.79](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.78...sn_faucet-v0.3.79) - 2024-02-21 - -### Other -- update Cargo.lock dependencies - -## [0.3.78](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.77...sn_faucet-v0.3.78) - 2024-02-20 - -### Other -- *(release)* sn_protocol-v0.14.6/sn_node-v0.104.33/sn-node-manager-v0.3.9/sn_cli-v0.89.78/sn_client-v0.104.25/sn_networking-v0.13.27/sn_node_rpc_client-v0.4.64 - -## [0.3.77](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.76...sn_faucet-v0.3.77) - 2024-02-20 - -### Other -- fix distribution test check - -## [0.3.76](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.75...sn_faucet-v0.3.76) - 2024-02-20 - -### Other -- updated the following local packages: sn_client - -## [0.3.75](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.74...sn_faucet-v0.3.75) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.26/sn-node-manager-v0.3.6/sn_client-v0.104.23/sn_node-v0.104.31 - -## [0.3.74](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.73...sn_faucet-v0.3.74) - 2024-02-20 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.73](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.72...sn_faucet-v0.3.73) - 2024-02-20 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.72](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.71...sn_faucet-v0.3.72) - 2024-02-20 - -### Other -- updated the following local packages: sn_client - -## [0.3.71](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.70...sn_faucet-v0.3.71) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.23/sn_node-v0.104.26/sn_client-v0.104.18/sn_node_rpc_client-v0.4.57 - -## [0.3.70](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.69...sn_faucet-v0.3.70) - 2024-02-19 - -### Other -- *(release)* sn_networking-v0.13.21/sn_client-v0.104.16/sn_node-v0.104.24 - -## [0.3.69](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.68...sn_faucet-v0.3.69) - 2024-02-19 - -### Other -- token_distribution against network - -## [0.3.68](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.67...sn_faucet-v0.3.68) - 2024-02-15 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.67](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.66...sn_faucet-v0.3.67) - 2024-02-15 - -### Other -- *(release)* sn_protocol-v0.14.1/sn-node-manager-v0.3.1/sn_cli-v0.89.68/sn_client-v0.104.13/sn_networking-v0.13.18/sn_node-v0.104.21/sn_node_rpc_client-v0.4.54 - -## [0.3.66](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.65...sn_faucet-v0.3.66) - 2024-02-15 - -### Other -- token_distribution -- *(release)* sn_protocol-v0.14.0/sn-node-manager-v0.3.0/sn_cli-v0.89.67/sn_client-v0.104.12/sn_networking-v0.13.17/sn_node-v0.104.20/sn_node_rpc_client-v0.4.53 - -## [0.3.65](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.64...sn_faucet-v0.3.65) - 2024-02-14 - -### Other -- *(release)* sn_protocol-v0.13.0/sn-node-manager-v0.2.0/sn_cli-v0.89.65/sn_client-v0.104.10/sn_networking-v0.13.15/sn_node-v0.104.18/sn_node_rpc_client-v0.4.51 - -## [0.3.64](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.63...sn_faucet-v0.3.64) - 2024-02-13 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.63](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.62...sn_faucet-v0.3.63) - 2024-02-12 - -### Other -- *(faucet)* improve faucet server response for clippy -- *(release)* sn_networking-v0.13.12/sn_node-v0.104.12/sn-node-manager-v0.1.59/sn_client-v0.104.7/sn_node_rpc_client-v0.4.46 - -## [0.3.62](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.61...sn_faucet-v0.3.62) - 2024-02-12 - -### Other -- updated the following local packages: sn_client - -## [0.3.61](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.60...sn_faucet-v0.3.61) - 2024-02-12 - -### Added -- *(faucet)* api endpoint to return distribution - -## [0.3.60](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.59...sn_faucet-v0.3.60) - 2024-02-12 - -### Other -- update Cargo.lock dependencies - -## [0.3.59](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.58...sn_faucet-v0.3.59) - 2024-02-09 - -### Other -- *(release)* sn_networking-v0.13.10/sn_client-v0.104.4/sn_node-v0.104.8 - -## [0.3.58](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.57...sn_faucet-v0.3.58) - 2024-02-09 - -### Other -- update dependencies - -## [0.3.57](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.56...sn_faucet-v0.3.57) - 2024-02-08 - -### Other -- copyright update to current year - -## [0.3.56](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.55...sn_faucet-v0.3.56) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.55](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.54...sn_faucet-v0.3.55) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.54](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.53...sn_faucet-v0.3.54) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.53](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.52...sn_faucet-v0.3.53) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.52](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.51...sn_faucet-v0.3.52) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.51](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.50...sn_faucet-v0.3.51) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.50](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.49...sn_faucet-v0.3.50) - 2024-02-07 - -### Other -- update dependencies - -## [0.3.49](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.48...sn_faucet-v0.3.49) - 2024-02-07 - -### Other -- update dependencies - -## [0.3.48](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.47...sn_faucet-v0.3.48) - 2024-02-06 - -### Other -- update dependencies - -## [0.3.47](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.46...sn_faucet-v0.3.47) - 2024-02-06 - -### Other -- update dependencies - -## [0.3.46](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.45...sn_faucet-v0.3.46) - 2024-02-06 - -### Other -- update dependencies - -## [0.3.45](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.44...sn_faucet-v0.3.45) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.44](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.43...sn_faucet-v0.3.44) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.43](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.42...sn_faucet-v0.3.43) - 2024-02-05 - -### Added -- *(faucet)* initial distributions in background -- *(faucet)* create distributions for maid addrs - -### Other -- *(ci)* make deps optional if used only inside a feature -- *(faucet)* fix typo/clippy/fmt after rebase - -## [0.3.42](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.41...sn_faucet-v0.3.42) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.41](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.40...sn_faucet-v0.3.41) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.40](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.39...sn_faucet-v0.3.40) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.39](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.38...sn_faucet-v0.3.39) - 2024-02-02 - -### Other -- update dependencies - -## [0.3.38](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.37...sn_faucet-v0.3.38) - 2024-02-02 - -### Other -- update dependencies - -## [0.3.37](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.36...sn_faucet-v0.3.37) - 2024-02-02 - -### Added -- make token distribution an option - -### Fixed -- minreq as optional dep - -## [0.3.36](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.35...sn_faucet-v0.3.36) - 2024-02-01 - -### Other -- update dependencies - -## [0.3.35](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.34...sn_faucet-v0.3.35) - 2024-02-01 - -### Other -- update dependencies - -## [0.3.34](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.33...sn_faucet-v0.3.34) - 2024-02-01 - -### Other -- update dependencies - -## [0.3.33](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.32...sn_faucet-v0.3.33) - 2024-01-31 - -### Other -- remove the `sn_testnet` crate - -## [0.3.32](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.31...sn_faucet-v0.3.32) - 2024-01-31 - -### Other -- update dependencies - -## [0.3.31](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.30...sn_faucet-v0.3.31) - 2024-01-31 - -### Other -- update dependencies - -## [0.3.30](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.29...sn_faucet-v0.3.30) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.29](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.28...sn_faucet-v0.3.29) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.28](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.27...sn_faucet-v0.3.28) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.27](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.26...sn_faucet-v0.3.27) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.26](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.25...sn_faucet-v0.3.26) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.25](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.24...sn_faucet-v0.3.25) - 2024-01-29 - -### Other -- update dependencies - -## [0.3.24](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.23...sn_faucet-v0.3.24) - 2024-01-29 - -### Added -- *(faucet)* add Snapshot type -- *(faucet)* get pubkeys from repo not pastebin -- *(faucet)* custom types for maid values -- *(faucet)* load public keys for distribution -- *(faucet)* snapshot is a hashmap - -## [0.3.23](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.22...sn_faucet-v0.3.23) - 2024-01-29 - -### Other -- update dependencies - -## [0.3.22](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.21...sn_faucet-v0.3.22) - 2024-01-29 - -### Other -- update dependencies - -## [0.3.21](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.20...sn_faucet-v0.3.21) - 2024-01-26 - -### Other -- update dependencies - -## [0.3.20](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.19...sn_faucet-v0.3.20) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.19](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.18...sn_faucet-v0.3.19) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.17...sn_faucet-v0.3.18) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.16...sn_faucet-v0.3.17) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.15...sn_faucet-v0.3.16) - 2024-01-25 - -### Added -- client webtransport-websys feat - -## [0.3.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.14...sn_faucet-v0.3.15) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.14](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.13...sn_faucet-v0.3.14) - 2024-01-24 - -### Other -- update dependencies - -## [0.3.13](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.12...sn_faucet-v0.3.13) - 2024-01-24 - -### Other -- update dependencies - -## [0.3.12](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.11...sn_faucet-v0.3.12) - 2024-01-24 - -### Other -- update dependencies - -## [0.3.11](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.10...sn_faucet-v0.3.11) - 2024-01-23 - -### Other -- update dependencies - -## [0.3.10](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.9...sn_faucet-v0.3.10) - 2024-01-23 - -### Other -- update dependencies - -## [0.3.9](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.8...sn_faucet-v0.3.9) - 2024-01-23 - -### Other -- update dependencies - -## [0.3.8](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.7...sn_faucet-v0.3.8) - 2024-01-22 - -### Other -- update dependencies - -## [0.3.7](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.6...sn_faucet-v0.3.7) - 2024-01-22 - -### Other -- update dependencies - -## [0.3.6](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.5...sn_faucet-v0.3.6) - 2024-01-21 - -### Other -- update dependencies - -## [0.3.5](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.4...sn_faucet-v0.3.5) - 2024-01-18 - -### Other -- update dependencies - -## [0.3.4](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.3...sn_faucet-v0.3.4) - 2024-01-18 - -### Other -- update dependencies - -## [0.3.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.2...sn_faucet-v0.3.3) - 2024-01-18 - -### Added -- set quic as default transport - -## [0.3.2](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.1...sn_faucet-v0.3.2) - 2024-01-18 - -### Added -- *(faucet)* download snapshot of maid balances - -## [0.3.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.0...sn_faucet-v0.3.1) - 2024-01-17 - -### Other -- update dependencies - -## [0.3.0](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.22...sn_faucet-v0.3.0) - 2024-01-17 - -### Other -- *(client)* [**breaking**] move out client connection progress bar - -## [0.2.22](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.21...sn_faucet-v0.2.22) - 2024-01-17 - -### Other -- update dependencies - -## [0.2.21](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.20...sn_faucet-v0.2.21) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.20](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.19...sn_faucet-v0.2.20) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.19](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.18...sn_faucet-v0.2.19) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.17...sn_faucet-v0.2.18) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.16...sn_faucet-v0.2.17) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.15...sn_faucet-v0.2.16) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.14...sn_faucet-v0.2.15) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.14](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.13...sn_faucet-v0.2.14) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.13](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.12...sn_faucet-v0.2.13) - 2024-01-12 - -### Other -- update dependencies - -## [0.2.12](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.11...sn_faucet-v0.2.12) - 2024-01-12 - -### Other -- update dependencies - -## [0.2.11](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.10...sn_faucet-v0.2.11) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.10](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.9...sn_faucet-v0.2.10) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.9](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.8...sn_faucet-v0.2.9) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.8](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.7...sn_faucet-v0.2.8) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.7](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.6...sn_faucet-v0.2.7) - 2024-01-10 - -### Other -- update dependencies - -## [0.2.6](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.5...sn_faucet-v0.2.6) - 2024-01-10 - -### Other -- update dependencies - -## [0.2.5](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.4...sn_faucet-v0.2.5) - 2024-01-10 - -### Other -- update dependencies - -## [0.2.4](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.3...sn_faucet-v0.2.4) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.2...sn_faucet-v0.2.3) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.2](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.1...sn_faucet-v0.2.2) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.0...sn_faucet-v0.2.1) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.0](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.126...sn_faucet-v0.2.0) - 2024-01-08 - -### Added -- provide `--first` argument for `safenode` - -## [0.1.126](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.125...sn_faucet-v0.1.126) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.125](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.124...sn_faucet-v0.1.125) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.124](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.123...sn_faucet-v0.1.124) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.123](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.122...sn_faucet-v0.1.123) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.122](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.121...sn_faucet-v0.1.122) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.121](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.120...sn_faucet-v0.1.121) - 2024-01-06 - -### Other -- update dependencies - -## [0.1.120](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.119...sn_faucet-v0.1.120) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.119](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.118...sn_faucet-v0.1.119) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.118](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.117...sn_faucet-v0.1.118) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.117](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.116...sn_faucet-v0.1.117) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.116](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.115...sn_faucet-v0.1.116) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.115](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.114...sn_faucet-v0.1.115) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.114](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.113...sn_faucet-v0.1.114) - 2024-01-04 - -### Other -- update dependencies - -## [0.1.113](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.112...sn_faucet-v0.1.113) - 2024-01-04 - -### Other -- update dependencies - -## [0.1.112](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.111...sn_faucet-v0.1.112) - 2024-01-03 - -### Other -- update dependencies - -## [0.1.111](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.110...sn_faucet-v0.1.111) - 2024-01-03 - -### Other -- update dependencies - -## [0.1.110](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.109...sn_faucet-v0.1.110) - 2024-01-03 - -### Other -- update dependencies - -## [0.1.109](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.108...sn_faucet-v0.1.109) - 2024-01-02 - -### Other -- update dependencies - -## [0.1.108](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.107...sn_faucet-v0.1.108) - 2024-01-02 - -### Other -- update dependencies - -## [0.1.107](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.106...sn_faucet-v0.1.107) - 2023-12-29 - -### Added -- restart faucet_server from breaking point - -## [0.1.106](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.105...sn_faucet-v0.1.106) - 2023-12-29 - -### Other -- update dependencies - -## [0.1.105](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.104...sn_faucet-v0.1.105) - 2023-12-29 - -### Other -- update dependencies - -## [0.1.104](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.103...sn_faucet-v0.1.104) - 2023-12-26 - -### Other -- update dependencies - -## [0.1.103](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.102...sn_faucet-v0.1.103) - 2023-12-22 - -### Other -- update dependencies - -## [0.1.102](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.101...sn_faucet-v0.1.102) - 2023-12-22 - -### Other -- update dependencies - -## [0.1.101](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.100...sn_faucet-v0.1.101) - 2023-12-21 - -### Other -- update dependencies - -## [0.1.100](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.99...sn_faucet-v0.1.100) - 2023-12-21 - -### Other -- update dependencies - -## [0.1.99](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.98...sn_faucet-v0.1.99) - 2023-12-20 - -### Other -- update dependencies - -## [0.1.98](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.97...sn_faucet-v0.1.98) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.97](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.96...sn_faucet-v0.1.97) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.96](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.95...sn_faucet-v0.1.96) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.95](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.94...sn_faucet-v0.1.95) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.94](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.93...sn_faucet-v0.1.94) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.93](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.92...sn_faucet-v0.1.93) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.92](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.91...sn_faucet-v0.1.92) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.91](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.90...sn_faucet-v0.1.91) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.90](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.89...sn_faucet-v0.1.90) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.89](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.88...sn_faucet-v0.1.89) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.88](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.87...sn_faucet-v0.1.88) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.87](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.86...sn_faucet-v0.1.87) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.86](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.85...sn_faucet-v0.1.86) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.85](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.84...sn_faucet-v0.1.85) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.84](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.83...sn_faucet-v0.1.84) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.83](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.82...sn_faucet-v0.1.83) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.82](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.81...sn_faucet-v0.1.82) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.81](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.80...sn_faucet-v0.1.81) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.80](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.79...sn_faucet-v0.1.80) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.79](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.78...sn_faucet-v0.1.79) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.78](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.77...sn_faucet-v0.1.78) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.77](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.76...sn_faucet-v0.1.77) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.76](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.75...sn_faucet-v0.1.76) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.75](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.74...sn_faucet-v0.1.75) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.74](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.73...sn_faucet-v0.1.74) - 2023-12-11 - -### Other -- update dependencies - -## [0.1.73](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.72...sn_faucet-v0.1.73) - 2023-12-11 - -### Other -- update dependencies - -## [0.1.72](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.71...sn_faucet-v0.1.72) - 2023-12-08 - -### Other -- update dependencies - -## [0.1.71](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.70...sn_faucet-v0.1.71) - 2023-12-08 - -### Other -- update dependencies - -## [0.1.70](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.69...sn_faucet-v0.1.70) - 2023-12-08 - -### Other -- update dependencies - -## [0.1.69](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.68...sn_faucet-v0.1.69) - 2023-12-07 - -### Other -- update dependencies - -## [0.1.68](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.67...sn_faucet-v0.1.68) - 2023-12-06 - -### Other -- update dependencies - -## [0.1.67](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.66...sn_faucet-v0.1.67) - 2023-12-06 - -### Other -- update dependencies - -## [0.1.66](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.65...sn_faucet-v0.1.66) - 2023-12-06 - -### Other -- update dependencies - -## [0.1.65](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.64...sn_faucet-v0.1.65) - 2023-12-06 - -### Other -- remove needless pass by value -- use inline format args -- add boilerplate for workspace lints - -## [0.1.64](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.63...sn_faucet-v0.1.64) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.63](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.62...sn_faucet-v0.1.63) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.62](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.61...sn_faucet-v0.1.62) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.61](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.60...sn_faucet-v0.1.61) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.60](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.59...sn_faucet-v0.1.60) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.59](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.58...sn_faucet-v0.1.59) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.58](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.57...sn_faucet-v0.1.58) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.57](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.56...sn_faucet-v0.1.57) - 2023-12-04 - -### Added -- *(testnet)* wait till faucet server starts - -### Other -- *(faucet)* print on claim genesis error - -## [0.1.56](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.55...sn_faucet-v0.1.56) - 2023-12-01 - -### Other -- update dependencies - -## [0.1.55](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.54...sn_faucet-v0.1.55) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.54](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.53...sn_faucet-v0.1.54) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.53](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.52...sn_faucet-v0.1.53) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.52](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.51...sn_faucet-v0.1.52) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.51](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.50...sn_faucet-v0.1.51) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.50](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.49...sn_faucet-v0.1.50) - 2023-11-29 - -### Added -- add missing quic features - -## [0.1.49](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.48...sn_faucet-v0.1.49) - 2023-11-29 - -### Added -- verify spends through the cli - -## [0.1.48](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.47...sn_faucet-v0.1.48) - 2023-11-28 - -### Other -- update dependencies - -## [0.1.47](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.46...sn_faucet-v0.1.47) - 2023-11-28 - -### Other -- update dependencies - -## [0.1.46](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.45...sn_faucet-v0.1.46) - 2023-11-28 - -### Other -- update dependencies - -## [0.1.45](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.44...sn_faucet-v0.1.45) - 2023-11-27 - -### Other -- update dependencies - -## [0.1.44](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.43...sn_faucet-v0.1.44) - 2023-11-24 - -### Other -- update dependencies - -## [0.1.43](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.42...sn_faucet-v0.1.43) - 2023-11-24 - -### Other -- update dependencies - -## [0.1.42](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.41...sn_faucet-v0.1.42) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.41](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.40...sn_faucet-v0.1.41) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.40](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.39...sn_faucet-v0.1.40) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.39](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.38...sn_faucet-v0.1.39) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.38](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.37...sn_faucet-v0.1.38) - 2023-11-22 - -### Other -- update dependencies - -## [0.1.37](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.36...sn_faucet-v0.1.37) - 2023-11-22 - -### Other -- update dependencies - -## [0.1.36](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.35...sn_faucet-v0.1.36) - 2023-11-22 - -### Other -- update dependencies - -## [0.1.35](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.34...sn_faucet-v0.1.35) - 2023-11-21 - -### Added -- make joining gossip for clients and rpc nodes optional - -## [0.1.34](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.33...sn_faucet-v0.1.34) - 2023-11-21 - -### Other -- update dependencies - -## [0.1.33](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.32...sn_faucet-v0.1.33) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.32](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.31...sn_faucet-v0.1.32) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.31](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.30...sn_faucet-v0.1.31) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.30](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.29...sn_faucet-v0.1.30) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.29](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.28...sn_faucet-v0.1.29) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.28](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.27...sn_faucet-v0.1.28) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.27](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.26...sn_faucet-v0.1.27) - 2023-11-17 - -### Other -- update dependencies - -## [0.1.26](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.25...sn_faucet-v0.1.26) - 2023-11-17 - -### Other -- update dependencies - -## [0.1.25](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.24...sn_faucet-v0.1.25) - 2023-11-16 - -### Other -- update dependencies - -## [0.1.24](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.23...sn_faucet-v0.1.24) - 2023-11-16 - -### Added -- massive cleaning to prepare for quotes - -## [0.1.23](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.22...sn_faucet-v0.1.23) - 2023-11-15 - -### Other -- update dependencies - -## [0.1.22](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.21...sn_faucet-v0.1.22) - 2023-11-15 - -### Other -- update dependencies - -## [0.1.21](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.20...sn_faucet-v0.1.21) - 2023-11-15 - -### Other -- update dependencies - -## [0.1.20](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.19...sn_faucet-v0.1.20) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.19](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.18...sn_faucet-v0.1.19) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.17...sn_faucet-v0.1.18) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.16...sn_faucet-v0.1.17) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.15...sn_faucet-v0.1.16) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.14...sn_faucet-v0.1.15) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.14](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.13...sn_faucet-v0.1.14) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.13](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.12...sn_faucet-v0.1.13) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.12](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.11...sn_faucet-v0.1.12) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.11](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.10...sn_faucet-v0.1.11) - 2023-11-10 - -### Other -- update dependencies - -## [0.1.10](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.9...sn_faucet-v0.1.10) - 2023-11-10 - -### Other -- update dependencies - -## [0.1.9](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.8...sn_faucet-v0.1.9) - 2023-11-09 - -### Other -- update dependencies - -## [0.1.8](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.7...sn_faucet-v0.1.8) - 2023-11-09 - -### Other -- update dependencies - -## [0.1.7](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.6...sn_faucet-v0.1.7) - 2023-11-09 - -### Other -- update dependencies - -## [0.1.6](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.5...sn_faucet-v0.1.6) - 2023-11-08 - -### Other -- update dependencies - -## [0.1.5](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.4...sn_faucet-v0.1.5) - 2023-11-08 - -### Other -- update dependencies - -## [0.1.4](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.3...sn_faucet-v0.1.4) - 2023-11-08 - -### Other -- update dependencies - -## [0.1.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.2...sn_faucet-v0.1.3) - 2023-11-07 - -### Other -- update dependencies - -## [0.1.2](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.1...sn_faucet-v0.1.2) - 2023-11-07 - -### Other -- update dependencies - -## [0.1.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.0...sn_faucet-v0.1.1) - 2023-11-07 - -### Other -- update dependencies - -## [0.1.0](https://github.com/maidsafe/safe_network/releases/tag/sn_faucet-v0.1.0) - 2023-11-07 - -### Fixed -- CI errors - -### Other -- move sn_faucet to its own crate diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml deleted file mode 100644 index 9a2d3e7d03..0000000000 --- a/sn_faucet/Cargo.toml +++ /dev/null @@ -1,57 +0,0 @@ -[package] -authors = ["MaidSafe Developers "] -description = "The Safe Network Faucet" -documentation = "https://docs.rs/sn_node" -edition = "2021" -homepage = "https://maidsafe.net" -license = "GPL-3.0" -name = "sn_faucet" -readme = "README.md" -repository = "https://github.com/maidsafe/safe_network" -version = "0.5.3" - -[features] -default = ["gifting"] -distribution = ["base64", "bitcoin", "minreq"] -gifting = [] -initial-data = ["reqwest", "futures"] - -[[bin]] -path = "src/main.rs" -name = "faucet" - -[dependencies] -warp = "0.3" -assert_fs = "1.0.0" -base64 = { version = "0.22.0", optional = true } -bitcoin = { version = "0.31.0", features = [ - "rand-std", - "base64", -], optional = true } -bls = { package = "blsttc", version = "8.0.1" } -clap = { version = "4.2.1", features = ["derive"] } -color-eyre = "0.6.2" -dirs-next = "~2.0.0" -hex = "0.4.3" -indicatif = { version = "0.17.5", features = ["tokio"] } -minreq = { version = "2.11.0", features = ["https-rustls"], optional = true } -serde = { version = "1.0.193", features = ["derive"] } -serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_cli = { path = "../sn_cli", version = "0.95.3" } -sn_client = { path = "../sn_client", version = "0.110.4" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -tokio = { version = "1.32.0", features = ["parking_lot", "rt"] } -tracing = { version = "~0.1.26" } -url = "2.5.0" -fs2 = "0.4.3" -reqwest = { version = "0.12.4", default-features = false, features = [ - "rustls-tls", -], optional = true } -futures = { version = "0.3.30", optional = true } - -[lints] -workspace = true diff --git a/sn_faucet/README.md b/sn_faucet/README.md deleted file mode 100644 index 041edc921d..0000000000 --- a/sn_faucet/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Safe Network Faucet -This is a command line application that allows you to run a Safe Network Faucet. - -## Usage -Run `cargo run -- ` to start the application. Some of the commands available are: - -- `ClaimGenesis`: Claim the amount in the genesis CashNote and deposit it to the faucet local wallet. -- `Send`: Send a specified amount of tokens to a specified wallet. -- `Server`: Starts an http server that will send tokens to anyone who requests them. - -For more information about each command, run `cargo run -- --help`. diff --git a/sn_faucet/maid_address_claims.csv b/sn_faucet/maid_address_claims.csv deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sn_faucet/src/faucet_server.rs b/sn_faucet/src/faucet_server.rs deleted file mode 100644 index ddd11d2f1f..0000000000 --- a/sn_faucet/src/faucet_server.rs +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::claim_genesis; -#[cfg(feature = "gifting")] -use crate::send_tokens; -#[cfg(feature = "distribution")] -use crate::token_distribution; -use color_eyre::eyre::Result; -use fs2::FileExt; -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, fund_faucet_from_genesis_wallet, - Client, -}; -use sn_transfers::{ - get_faucet_data_dir, wallet_lockfile_name, NanoTokens, Transfer, WALLET_DIR_NAME, -}; -use std::path::Path; -use std::{collections::HashMap, sync::Arc}; -use tokio::sync::Semaphore; -use tracing::{debug, error, info, warn}; -use warp::{ - http::{Response, StatusCode}, - Filter, Reply, -}; - -#[cfg(feature = "initial-data")] -use crate::gutenberger::{download_book, State}; -#[cfg(feature = "initial-data")] -use reqwest::Client as ReqwestClient; -#[cfg(feature = "initial-data")] -use sn_cli::FilesUploader; -#[cfg(feature = "initial-data")] -use sn_client::{UploadCfg, BATCH_SIZE}; -#[cfg(feature = "initial-data")] -use sn_protocol::storage::{ChunkAddress, RetryStrategy}; -#[cfg(feature = "initial-data")] -use std::{fs::File, path::PathBuf}; -#[cfg(feature = "initial-data")] -use tokio::{fs, io::AsyncWriteExt}; - -/// Run the faucet server. -/// -/// This will listen on port 8000 and send a transfer of tokens as response to any GET request. -/// -/// # Example -/// -/// ```bash -/// # run faucet server -/// cargo run --features="local-discovery" --bin faucet --release -- server -/// -/// # query faucet server for money for our address `get local wallet address` -/// curl "localhost:8000/`cargo run --features="local-discovery" --bin safe --release wallet address | tail -n 1`" > transfer_hex -/// -/// # receive transfer with our wallet -/// cargo run --features="local-discovery" --bin safe --release wallet receive --file transfer_hex -/// -/// # balance should be updated -/// ``` -pub async fn run_faucet_server(client: &Client) -> Result<()> { - let root_dir = get_faucet_data_dir(); - let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None)?; - claim_genesis(client, wallet).await.inspect_err(|_err| { - println!("Faucet Server couldn't start as we failed to claim Genesis"); - eprintln!("Faucet Server couldn't start as we failed to claim Genesis"); - error!("Faucet Server couldn't start as we failed to claim Genesis"); - })?; - - #[cfg(feature = "initial-data")] - { - let _ = upload_initial_data(client, &root_dir).await; - } - - startup_server(client.clone()).await -} - -#[cfg(feature = "initial-data")] -/// Trigger one by one uploading of intitial data packets to the entwork. -async fn upload_initial_data(client: &Client, root_dir: &Path) -> Result<()> { - let temp_dir = std::env::temp_dir(); - let state_file = temp_dir.join("state.json"); - let uploaded_books_file = temp_dir.join("uploaded_books.json"); - let mut state = State::load_from_file(&state_file)?; - - let reqwest_client = ReqwestClient::new(); - - let mut uploaded_books: Vec<(String, String)> = if uploaded_books_file.exists() { - let file = File::open(&uploaded_books_file)?; - serde_json::from_reader(file)? - } else { - vec![] - }; - - println!("Previous upload state restored"); - info!("Previous upload state restored"); - - for book_id in state.max_seen()..u16::MAX as u32 { - if state.has_seen(book_id) { - println!("Already seen book ID: {book_id}"); - info!("Already seen book ID: {book_id}"); - continue; - } - - match download_book(&reqwest_client, book_id).await { - Ok(data) => { - println!("Downloaded book ID: {book_id}"); - info!("Downloaded book ID: {book_id}"); - - let fname = format!("{book_id}.book"); - let fpath = temp_dir.join(fname.clone()); - - match mark_download_progress(book_id, &fpath, data, &mut state, &state_file).await { - Ok(_) => { - println!("Marked download progress book ID: {book_id} completed"); - info!("Marked download progress book ID: {book_id} completed"); - } - Err(err) => { - println!("When marking download progress book ID: {book_id}, encountered error {err:?}"); - error!("When marking download progress book ID: {book_id}, encountered error {err:?}"); - continue; - } - } - - match upload_downloaded_book(client, root_dir, fpath).await { - Ok(head_addresses) => { - println!("Uploaded book ID: {book_id}"); - info!("Uploaded book ID: {book_id}"); - - // There shall be just one - for head_address in head_addresses { - uploaded_books.push((fname.clone(), head_address.to_hex())); - - match mark_upload_progress(&uploaded_books_file, &uploaded_books) { - Ok(_) => { - println!("Marked upload progress book ID: {book_id} completed"); - info!("Marked upload progress book ID: {book_id} completed"); - } - Err(err) => { - println!("When marking upload progress book ID: {book_id}, encountered error {err:?}"); - error!("When marking upload progress book ID: {book_id}, encountered error {err:?}"); - continue; - } - } - } - } - Err(err) => { - println!("Failed to upload book ID: {book_id} with error {err:?}"); - info!("Failed to upload book ID: {book_id} with error {err:?}"); - } - } - - println!("Sleeping for 1 minutes..."); - tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; - } - Err(e) => { - eprintln!("Failed to download book ID {book_id}: {e:?}"); - } - } - } - - Ok(()) -} - -#[cfg(feature = "initial-data")] -async fn mark_download_progress( - book_id: u32, - fpath: &Path, - data: Vec, - state: &mut State, - state_file: &Path, -) -> Result<()> { - let mut dest = fs::File::create(fpath).await?; - dest.write_all(&data).await?; - - state.mark_seen(book_id); - state.save_to_file(state_file)?; - Ok(()) -} - -#[cfg(feature = "initial-data")] -fn mark_upload_progress(fpath: &Path, uploaded_books: &Vec<(String, String)>) -> Result<()> { - let file = File::create(fpath)?; - serde_json::to_writer(file, &uploaded_books)?; - Ok(()) -} - -#[cfg(feature = "initial-data")] -async fn upload_downloaded_book( - client: &Client, - root_dir: &Path, - file_path: PathBuf, -) -> Result> { - let upload_cfg = UploadCfg { - batch_size: BATCH_SIZE, - verify_store: true, - retry_strategy: RetryStrategy::Quick, - ..Default::default() - }; - - let files_uploader = FilesUploader::new(client.clone(), root_dir.to_path_buf()) - .set_make_data_public(true) - .set_upload_cfg(upload_cfg) - .insert_path(&file_path); - - let summary = match files_uploader.start_upload().await { - Ok(summary) => summary, - Err(err) => { - println!("Failed to upload {file_path:?} with error {err:?}"); - return Ok(vec![]); - } - }; - - info!( - "File {file_path:?} uploaded completed with summary {:?}", - summary.upload_summary - ); - println!( - "File {file_path:?} uploaded completed with summary {:?}", - summary.upload_summary - ); - - let mut head_addresses = vec![]; - for (_, file_name, head_address) in summary.completed_files.iter() { - info!( - "Head address of {file_name:?} is {:?}", - head_address.to_hex() - ); - println!( - "Head address of {file_name:?} is {:?}", - head_address.to_hex() - ); - head_addresses.push(*head_address); - } - - Ok(head_addresses) -} - -pub async fn restart_faucet_server(client: &Client) -> Result<()> { - let root_dir = get_faucet_data_dir(); - println!("Loading the previous wallet at {root_dir:?}"); - debug!("Loading the previous wallet at {root_dir:?}"); - - deposit(&root_dir)?; - - println!("Previous wallet loaded"); - debug!("Previous wallet loaded"); - - startup_server(client.clone()).await -} - -#[cfg(feature = "distribution")] -async fn respond_to_distribution_request( - client: Client, - query: HashMap, - balances: HashMap, - semaphore: Arc, -) -> std::result::Result { - let permit = semaphore.try_acquire(); - - // some rate limiting - if is_wallet_locked() || permit.is_err() { - warn!("Rate limited request due to locked wallet"); - - let mut response = Response::new("Rate limited".to_string()); - *response.status_mut() = StatusCode::TOO_MANY_REQUESTS; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - - let r = - match token_distribution::handle_distribution_req(&client, query, balances.clone()).await { - Ok(distribution) => Response::new(distribution.to_string()), - Err(err) => { - eprintln!("Failed to get distribution: {err}"); - error!("Failed to get distribution: {err}"); - Response::new(format!("Failed to get distribution: {err}")) - } - }; - - Ok(r) -} - -fn is_wallet_locked() -> bool { - info!("Checking if wallet is locked"); - let root_dir = get_faucet_data_dir(); - - let wallet_dir = root_dir.join(WALLET_DIR_NAME); - let wallet_lockfile_name = wallet_lockfile_name(&wallet_dir); - let file_result = std::fs::OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(wallet_lockfile_name) - .and_then(|file| file.try_lock_exclusive()); - info!("After if wallet is locked"); - - if file_result.is_err() { - // Either opening the file or locking it failed, indicating rate limiting should occur - return true; - } - - false -} - -async fn respond_to_donate_request( - client: Client, - transfer_str: String, - semaphore: Arc, -) -> std::result::Result { - let permit = semaphore.try_acquire(); - info!("Got donate request with: {transfer_str}"); - - // some rate limiting - if is_wallet_locked() || permit.is_err() { - warn!("Rate limited request due"); - let mut response = Response::new("Rate limited".to_string()); - *response.status_mut() = StatusCode::TOO_MANY_REQUESTS; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - - let faucet_root = get_faucet_data_dir(); - let mut wallet = match load_account_wallet_or_create_with_mnemonic(&faucet_root, None) { - Ok(wallet) => wallet, - Err(_error) => { - let mut response = Response::new("Could not load wallet".to_string()); - *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - }; - - if let Err(err) = fund_faucet_from_genesis_wallet(&client, &mut wallet).await { - eprintln!("Failed to load + fund faucet wallet: {err}"); - error!("Failed to load + fund faucet wallet: {err}"); - let mut response = Response::new(format!("Failed to load faucet wallet: {err}")); - *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; - return Ok(response); - }; - - // return key is Transfer is empty - if transfer_str.is_empty() { - let address = wallet.address().to_hex(); - return Ok(Response::new(format!("Faucet wallet address: {address}"))); - } - - // parse transfer - let transfer = match Transfer::from_hex(&transfer_str) { - Ok(t) => t, - Err(err) => { - eprintln!("Failed to parse transfer: {err}"); - error!("Failed to parse transfer {transfer_str}: {err}"); - let mut response = Response::new(format!("Failed to parse transfer: {err}")); - *response.status_mut() = StatusCode::BAD_REQUEST; - return Ok(response); - } - }; - - // receive transfer - let res = client.receive(&transfer, &wallet).await; - match res { - Ok(cashnotes) => { - let old_balance = wallet.balance(); - if let Err(e) = wallet.deposit_and_store_to_disk(&cashnotes) { - eprintln!("Failed to store deposited amount: {e}"); - error!("Failed to store deposited amount: {e}"); - let mut response = Response::new(format!("Failed to store deposited amount: {e}")); - *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; - return Ok(response); - } - let new_balance = wallet.balance(); - - info!("Successfully stored cash_note to wallet dir"); - info!("Old balance: {old_balance}, new balance: {new_balance}"); - - Ok(Response::new("Thank you!".to_string())) - } - Err(err) => { - eprintln!("Failed to verify and redeem transfer: {err}"); - error!("Failed to verify and redeem transfer: {err}"); - let mut response = - Response::new(format!("Failed to verify and redeem transfer: {err}")); - *response.status_mut() = StatusCode::BAD_REQUEST; - Ok(response) - } - } -} - -#[cfg(not(feature = "gifting"))] -#[expect(clippy::unused_async)] -async fn respond_to_gift_request( - _client: Client, - _key: String, - _semaphore: Arc, -) -> std::result::Result { - let mut response = Response::new("Gifting not enabled".to_string()); - *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; - - Ok(response) -} - -#[cfg(feature = "gifting")] -async fn respond_to_gift_request( - client: Client, - key: String, - semaphore: Arc, -) -> std::result::Result { - let faucet_root = get_faucet_data_dir(); - - let from = match load_account_wallet_or_create_with_mnemonic(&faucet_root, None) { - Ok(wallet) => wallet, - Err(_error) => { - let mut response = Response::new("Could not load wallet".to_string()); - *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - }; - - let permit = semaphore.try_acquire(); - - // some rate limiting - if is_wallet_locked() || permit.is_err() { - warn!("Rate limited request due"); - let mut response = Response::new("Rate limited".to_string()); - *response.status_mut() = StatusCode::TOO_MANY_REQUESTS; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - - const GIFT_AMOUNT_SNT: &str = "1"; - match send_tokens(&client, from, GIFT_AMOUNT_SNT, &key).await { - Ok(transfer) => { - println!("Sent tokens to {key}"); - debug!("Sent tokens to {key}"); - Ok(Response::new(transfer.to_string())) - } - Err(err) => { - eprintln!("Failed to send tokens to {key}: {err}"); - error!("Failed to send tokens to {key}: {err}"); - Ok(Response::new(format!("Failed to send tokens: {err}"))) - } - } -} - -async fn startup_server(client: Client) -> Result<()> { - // Create a semaphore with a single permit - let semaphore = Arc::new(Semaphore::new(1)); - - #[expect(unused)] - let mut balances = HashMap::::new(); - #[cfg(feature = "distribution")] - { - balances = token_distribution::load_maid_snapshot()?; - let keys = token_distribution::load_maid_claims()?; - // Each distribution takes about 500ms to create, so for thousands of - // initial distributions this takes many minutes. This is run in the - // background instead of blocking the server from starting. - tokio::spawn(token_distribution::distribute_from_maid_to_tokens( - client.clone(), - balances.clone(), - keys, - )); - } - - let gift_client = client.clone(); - let donation_client = client.clone(); - let donation_addr_client = client.clone(); - let donation_semaphore = Arc::clone(&semaphore); - let donation_addr_semaphore = Arc::clone(&semaphore); - #[cfg(feature = "distribution")] - let semaphore_dist = Arc::clone(&semaphore); - - // GET /distribution/address=address&wallet=wallet&signature=signature - #[cfg(feature = "distribution")] - let distribution_route = warp::get() - .and(warp::path("distribution")) - .and(warp::query::>()) - .map(|query| { - debug!("Received distribution request: {query:?}"); - query - }) - .and_then(move |query| { - let semaphore = Arc::clone(&semaphore_dist); - let client = client.clone(); - respond_to_distribution_request(client, query, balances.clone(), semaphore) - }); - - // GET /key - let gift_route = warp::get() - .and(warp::path!(String)) - .map(|query| { - debug!("Gift distribution request: {query}"); - query - }) - .and_then(move |key| { - let client = gift_client.clone(); - let semaphore = Arc::clone(&semaphore); - - respond_to_gift_request(client, key, semaphore) - }); - - // GET /donate - let donation_addr = warp::get().and(warp::path("donate")).and_then(move || { - debug!("Donation address request"); - let client = donation_addr_client.clone(); - let semaphore = Arc::clone(&donation_addr_semaphore); - - respond_to_donate_request(client, String::new(), semaphore) - }); - - // GET /donate/transfer - let donation_route = warp::get() - .and(warp::path!("donate" / String)) - .map(|query| { - debug!("Donation request: {query}"); - query - }) - .and_then(move |transfer| { - let client = donation_client.clone(); - let semaphore = Arc::clone(&donation_semaphore); - - respond_to_donate_request(client, transfer, semaphore) - }); - - println!("Starting http server listening on port 8000..."); - debug!("Starting http server listening on port 8000..."); - - #[cfg(feature = "distribution")] - warp::serve( - distribution_route - .or(donation_route) - .or(donation_addr) - .or(gift_route), - ) - // warp::serve(gift_route) - .run(([0, 0, 0, 0], 8000)) - .await; - - #[cfg(not(feature = "distribution"))] - warp::serve(donation_route.or(donation_addr).or(gift_route)) - .run(([0, 0, 0, 0], 8000)) - .await; - - debug!("Server closed"); - Ok(()) -} - -fn deposit(root_dir: &Path) -> Result<()> { - let mut wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - let previous_balance = wallet.balance(); - - wallet.try_load_cash_notes()?; - - let deposited = NanoTokens::from(wallet.balance().as_nano() - previous_balance.as_nano()); - if deposited.is_zero() { - println!("Nothing deposited."); - } else if let Err(err) = wallet.deposit_and_store_to_disk(&vec![]) { - println!("Failed to store deposited ({deposited}) amount: {err:?}"); - } else { - println!("Deposited {deposited}."); - } - - Ok(()) -} diff --git a/sn_faucet/src/gutenberger.rs b/sn_faucet/src/gutenberger.rs deleted file mode 100644 index 4968c93cc4..0000000000 --- a/sn_faucet/src/gutenberger.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use color_eyre::eyre::Result; -use reqwest::Client; -use serde::{Deserialize, Serialize}; -use std::collections::HashSet; -use std::fs::File; -use std::path::Path; - -#[derive(Serialize, Deserialize)] -pub(crate) struct State { - seen_books: HashSet, -} - -impl State { - pub(crate) fn new() -> Self { - State { - seen_books: HashSet::new(), - } - } - - pub(crate) fn load_from_file(path: &Path) -> Result { - if path.exists() { - let file = File::open(path)?; - let state: State = serde_json::from_reader(file)?; - Ok(state) - } else { - Ok(Self::new()) - } - } - - pub(crate) fn save_to_file(&self, path: &Path) -> Result<()> { - let file = File::create(path)?; - serde_json::to_writer(file, self)?; - Ok(()) - } - - pub(crate) fn mark_seen(&mut self, book_id: u32) { - self.seen_books.insert(book_id); - } - - pub(crate) fn has_seen(&self, book_id: u32) -> bool { - if book_id == 0 && self.seen_books.is_empty() { - return true; - } - self.seen_books.contains(&book_id) - } - - pub(crate) fn max_seen(&self) -> u32 { - if let Some(result) = self.seen_books.iter().max() { - *result - } else { - 0 - } - } -} - -pub(crate) async fn download_book(client: &Client, book_id: u32) -> Result> { - let url = format!("http://www.gutenberg.org/ebooks/{book_id}.txt.utf-8"); - let response = client.get(&url).send().await?.bytes().await?; - Ok(response.to_vec()) -} diff --git a/sn_faucet/src/main.rs b/sn_faucet/src/main.rs deleted file mode 100644 index 833178a8f9..0000000000 --- a/sn_faucet/src/main.rs +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod faucet_server; -#[cfg(feature = "initial-data")] -pub(crate) mod gutenberger; -#[cfg(feature = "distribution")] -mod token_distribution; - -use clap::{Parser, Subcommand}; -use color_eyre::eyre::{bail, eyre, Result}; -use faucet_server::{restart_faucet_server, run_faucet_server}; -use indicatif::ProgressBar; -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, fund_faucet_from_genesis_wallet, send, - Client, ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver, -}; -use sn_logging::{Level, LogBuilder, LogOutputDest}; -use sn_peers_acquisition::PeersArgs; -use sn_transfers::{get_faucet_data_dir, HotWallet, MainPubkey, NanoTokens, Transfer}; -use std::{path::PathBuf, time::Duration}; -use tokio::{sync::broadcast::error::RecvError, task::JoinHandle}; -use tracing::{debug, error, info}; - -#[tokio::main] -async fn main() -> Result<()> { - let opt = Opt::parse(); - - let bootstrap_peers = opt.peers.get_peers().await?; - let bootstrap_peers = if bootstrap_peers.is_empty() { - // empty vec is returned if `local-discovery` flag is provided - None - } else { - Some(bootstrap_peers) - }; - - let logging_targets = vec![ - // TODO: Reset to nice and clean defaults once we have a better idea of what we want - ("faucet".to_string(), Level::TRACE), - ("sn_client".to_string(), Level::TRACE), - ("sn_faucet".to_string(), Level::TRACE), - ("sn_networking".to_string(), Level::DEBUG), - ("sn_build_info".to_string(), Level::TRACE), - ("sn_logging".to_string(), Level::TRACE), - ("sn_peers_acquisition".to_string(), Level::TRACE), - ("sn_protocol".to_string(), Level::TRACE), - ("sn_registers".to_string(), Level::TRACE), - ("sn_transfers".to_string(), Level::TRACE), - ]; - - let mut log_builder = LogBuilder::new(logging_targets); - log_builder.output_dest(opt.log_output_dest); - let _log_handles = log_builder.initialize()?; - - debug!( - "faucet built with git version: {}", - sn_build_info::git_info() - ); - println!( - "faucet built with git version: {}", - sn_build_info::git_info() - ); - info!("Instantiating a SAFE Test Faucet..."); - - let secret_key = bls::SecretKey::random(); - let broadcaster = ClientEventsBroadcaster::default(); - let (progress_bar, handle) = spawn_connection_progress_bar(broadcaster.subscribe()); - let result = Client::new(secret_key, bootstrap_peers, None, Some(broadcaster)).await; - let client = match result { - Ok(client) => client, - Err(err) => { - // clean up progress bar - progress_bar.finish_with_message("Could not connect to the network"); - error!("Failed to get Client with err {err:?}"); - return Err(err.into()); - } - }; - handle.await?; - - let root_dir = get_faucet_data_dir(); - let mut funded_faucet = match load_account_wallet_or_create_with_mnemonic(&root_dir, None) { - Ok(wallet) => wallet, - Err(err) => { - println!("failed to load wallet for faucet! with error {err:?}"); - error!("failed to load wallet for faucet! with error {err:?}"); - return Err(err.into()); - } - }; - - fund_faucet_from_genesis_wallet(&client, &mut funded_faucet).await?; - - if let Err(err) = faucet_cmds(opt.cmd.clone(), &client, funded_faucet).await { - error!("Failed to run faucet cmd {:?} with err {err:?}", opt.cmd); - eprintln!("Failed to run faucet cmd {:?} with err {err:?}", opt.cmd); - } - - Ok(()) -} - -/// Helper to subscribe to the client events broadcaster and spin up a progress bar that terminates when the -/// client successfully connects to the network or if it errors out. -fn spawn_connection_progress_bar(mut rx: ClientEventsReceiver) -> (ProgressBar, JoinHandle<()>) { - // Network connection progress bar - let progress_bar = ProgressBar::new_spinner(); - let progress_bar_clone = progress_bar.clone(); - progress_bar.enable_steady_tick(Duration::from_millis(120)); - progress_bar.set_message("Connecting to The SAFE Network..."); - let new_style = progress_bar.style().tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈🔗"); - progress_bar.set_style(new_style); - - progress_bar.set_message("Connecting to The SAFE Network..."); - - let handle = tokio::spawn(async move { - let mut peers_connected = 0; - loop { - match rx.recv().await { - Ok(ClientEvent::ConnectedToNetwork) => { - progress_bar.finish_with_message("Connected to the Network"); - break; - } - Ok(ClientEvent::PeerAdded { - max_peers_to_connect, - }) => { - peers_connected += 1; - progress_bar.set_message(format!( - "{peers_connected}/{max_peers_to_connect} initial peers found.", - )); - } - Err(RecvError::Lagged(_)) => { - // Even if the receiver is lagged, we would still get the ConnectedToNetwork during each new - // connection. Thus it would be okay to skip this error. - } - Err(RecvError::Closed) => { - progress_bar.finish_with_message("Could not connect to the network"); - break; - } - _ => {} - } - } - }); - (progress_bar_clone, handle) -} - -#[derive(Parser)] -#[command(author, version, about, long_about = None)] -struct Opt { - /// Specify the logging output destination. - /// - /// Valid values are "stdout", "data-dir", or a custom path. - /// - /// `data-dir` is the default value. - /// - /// The data directory location is platform specific: - /// - Linux: $HOME/.local/share/safe/client/logs - /// - macOS: $HOME/Library/Application Support/safe/client/logs - /// - Windows: C:\Users\\AppData\Roaming\safe\client\logs - #[clap(long, value_parser = parse_log_output, verbatim_doc_comment, default_value = "data-dir")] - pub log_output_dest: LogOutputDest, - - #[command(flatten)] - peers: PeersArgs, - - /// Available sub commands. - #[clap(subcommand)] - pub cmd: SubCmd, -} - -#[derive(Subcommand, Debug, Clone)] -enum SubCmd { - /// Claim the amount in the genesis CashNote and deposit it to the faucet local wallet. - /// This needs to be run before a testnet is opened to the public, as to not have - /// the genesis claimed by someone else (the key and cash_note are public for audit). - ClaimGenesis, - Send { - /// This shall be the number of nanos to send. - #[clap(name = "amount")] - amount: String, - /// This must be a hex-encoded `MainPubkey`. - #[clap(name = "to")] - to: String, - }, - /// Starts an http server that will send tokens to anyone who requests them. - /// curl http://localhost:8000/your-hex-encoded-wallet-public-address - Server, - /// Restart the faucet_server from the last breaking point. - /// - /// Before firing this cmd, ensure: - /// 1, The previous faucet_server has been stopped. - /// 2, Invalid cash_notes have been removed from the cash_notes folder. - /// 3, The old `wallet` and `wallet.lock` files shall also be removed. - /// The command will create a new wallet with the same key, - /// then deposit all valid cash_notes into wallet and startup the faucet_server. - RestartServer, -} - -async fn faucet_cmds(cmds: SubCmd, client: &Client, funded_wallet: HotWallet) -> Result<()> { - match cmds { - SubCmd::ClaimGenesis => { - claim_genesis(client, funded_wallet).await?; - } - SubCmd::Send { amount, to } => { - send_tokens(client, funded_wallet, &amount, &to).await?; - } - SubCmd::Server => { - // shouldn't return except on error - run_faucet_server(client).await?; - } - SubCmd::RestartServer => { - // shouldn't return except on error - restart_faucet_server(client).await?; - } - } - Ok(()) -} - -async fn claim_genesis(client: &Client, mut wallet: HotWallet) -> Result<()> { - for i in 1..6 { - if let Err(e) = fund_faucet_from_genesis_wallet(client, &mut wallet).await { - println!("Failed to claim genesis: {e}"); - } else { - println!("Genesis claimed!"); - return Ok(()); - } - println!("Trying to claiming genesis... attempt {i}"); - } - bail!("Failed to claim genesis") -} - -/// returns the hex-encoded transfer -async fn send_tokens(client: &Client, from: HotWallet, amount: &str, to: &str) -> Result { - let to = MainPubkey::from_hex(to)?; - use std::str::FromStr; - let amount = NanoTokens::from_str(amount)?; - if amount.as_nano() == 0 { - println!("Invalid format or zero amount passed in. Nothing sent."); - return Err(eyre!( - "Invalid format or zero amount passed in. Nothing sent." - )); - } - - let cash_note = send(from, amount, to, client, true).await?; - let transfer_hex = Transfer::transfer_from_cash_note(&cash_note)?.to_hex()?; - println!("{transfer_hex}"); - - Ok(transfer_hex) -} - -fn parse_log_output(val: &str) -> Result { - match val { - "stdout" => Ok(LogOutputDest::Stdout), - "data-dir" => { - let dir = get_faucet_data_dir().join("logs"); - Ok(LogOutputDest::Path(dir)) - } - // The path should be a directory, but we can't use something like `is_dir` to check - // because the path doesn't need to exist. We can create it for the user. - value => Ok(LogOutputDest::Path(PathBuf::from(value))), - } -} diff --git a/sn_faucet/src/token_distribution.rs b/sn_faucet/src/token_distribution.rs deleted file mode 100644 index 76e7b46a9f..0000000000 --- a/sn_faucet/src/token_distribution.rs +++ /dev/null @@ -1,734 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::send_tokens; -#[cfg(feature = "distribution")] -use base64::Engine; -use color_eyre::eyre::{eyre, Result}; -use serde::{Deserialize, Serialize}; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::Client; -use sn_transfers::{get_faucet_data_dir, MainPubkey, NanoTokens}; -use std::str::FromStr; -use std::{collections::HashMap, path::PathBuf}; -use tracing::info; - -const SNAPSHOT_FILENAME: &str = "snapshot.json"; -const SNAPSHOT_URL: &str = "https://api.omniexplorer.info/ask.aspx?api=getpropertybalances&prop=3"; -const CLAIMS_URL: &str = - "https://github.com/maidsafe/safe_network/raw/main/sn_faucet/maid_address_claims.csv"; -const HTTP_STATUS_OK: i32 = 200; - -type MaidAddress = String; // base58 encoded -type Snapshot = HashMap; - -// Parsed from json in SNAPSHOT_URL -#[derive(Serialize, Deserialize)] -struct MaidBalance { - address: MaidAddress, - balance: String, - reserved: String, -} - -// Maid owners supply info that allows the faucet to distribute their funds. -// They sign a safe wallet address using their maid key to prove ownership of -// the maid. -// The faucet will distribute SNT directly to that safe wallet address. -pub struct MaidClaim { - address: String, // base58 encoded bitcoin address owning omni maid - pubkey: String, // hex encoded bitcoin public key - wallet: String, // hex encoded safe wallet address - signature: String, // base64 encoded bitcoin signature of the wallet hex -} - -impl MaidClaim { - pub fn new(address: MaidAddress, wallet: String, signature: String) -> Result { - let pubkey = match pubkey_from_signature(&wallet, &signature) { - Ok(pk) => pk, - Err(err) => { - return Err(eyre!("Invalid signature: {err}")); - } - }; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - let mc = MaidClaim { - address, - pubkey: pubkey_hex, - wallet, - signature, - }; - mc.is_valid()?; - Ok(mc) - } - - pub fn from_csv_line(line: &str) -> Result { - let cells = line.trim().split(',').collect::>(); - if cells.len() != 4 { - let msg = format!("Invalid claim csv: {line}"); - return Err(eyre!(msg.to_string())); - } - let mc = MaidClaim { - address: cells[0].to_string(), - pubkey: cells[1].to_string(), - wallet: cells[2].to_string(), - signature: cells[3].to_string(), - }; - mc.is_valid()?; - Ok(mc) - } - - pub fn to_csv_line(&self) -> String { - format!( - "{},{},{},{}", - self.address, self.pubkey, self.wallet, self.signature - ) - } - - pub fn is_valid(&self) -> Result<()> { - // check signature is correct - check_signature(&self.address, &self.wallet, &self.signature)?; - // check pk matches address - if !maid_pk_matches_address(&self.address, &self.pubkey) { - return Err(eyre!("Claim public key does not match address")); - } - // check wallet is a valid bls pubkey - if MainPubkey::from_hex(&self.wallet).is_err() { - return Err(eyre!("Invalid bls public key")); - }; - // if all the checks are ok, it's valid - Ok(()) - } - - pub fn save_to_file(&self) -> Result<()> { - // check it's valid before we write it, can't know for sure it was - // already validated - self.is_valid()?; - // if it already exists, overwrite it - let addr_path = get_claims_data_dir_path()?.join(self.address.clone()); - let csv_line = self.to_csv_line(); - std::fs::write(addr_path, csv_line)?; - Ok(()) - } -} - -// This is different to test_faucet_data_dir because it should *not* be -// removed when --clean flag is specified. -fn get_snapshot_data_dir_path() -> Result { - let dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe_snapshot"); - std::fs::create_dir_all(dir.clone())?; - Ok(dir.to_path_buf()) -} - -fn get_claims_data_dir_path() -> Result { - let dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe_snapshot") - .join("claims"); - std::fs::create_dir_all(dir.clone())?; - Ok(dir.to_path_buf()) -} - -fn get_distributions_data_dir_path() -> Result { - let dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe_snapshot") - .join("distributions"); - std::fs::create_dir_all(dir.clone())?; - Ok(dir.to_path_buf()) -} - -pub fn load_maid_snapshot() -> Result { - // If the faucet restarts there will be an existing snapshot which should - // be used to avoid conflicts in the balances between two different - // snapshots. - // Check if a previous snapshot already exists - let root_dir = get_snapshot_data_dir_path()?; - let filename = root_dir.join(SNAPSHOT_FILENAME); - if std::fs::metadata(filename.clone()).is_ok() { - info!("Using existing maid snapshot from {:?}", filename); - maid_snapshot_from_file(filename) - } else { - info!("Fetching snapshot from {}", SNAPSHOT_URL); - maid_snapshot_from_internet(filename) - } -} - -fn maid_snapshot_from_file(snapshot_path: PathBuf) -> Result { - let content = std::fs::read_to_string(snapshot_path)?; - parse_snapshot(content) -} - -fn maid_snapshot_from_internet(snapshot_path: PathBuf) -> Result { - // make the request - let response = minreq::get(SNAPSHOT_URL).send()?; - // check the request is ok - if response.status_code != HTTP_STATUS_OK { - let msg = format!("Snapshot failed with http status {}", response.status_code); - return Err(eyre!(msg)); - } - // write the response to file - let body = response.as_str()?; - info!("Writing snapshot to {:?}", snapshot_path); - std::fs::write(snapshot_path.clone(), body)?; - info!("Saved snapshot to {:?}", snapshot_path); - // parse the json response - parse_snapshot(body.to_string()) -} - -fn parse_snapshot(json_str: String) -> Result { - let balances: Vec = serde_json::from_str(&json_str)?; - let mut balances_map: Snapshot = Snapshot::new(); - // verify the snapshot is ok - // balances must match the ico amount, which is slightly higher than - // 2^32/10 because of the ico process. - // see https://omniexplorer.info/asset/3 - let supply = NanoTokens::from(452_552_412_000_000_000); - let mut total = NanoTokens::zero(); - for b in &balances { - // The reserved amount is the amount currently for sale on omni dex. - // If it's not included the total is lower than expected. - // So the amount of maid an address owns is balance + reserved. - let balance = NanoTokens::from_str(&b.balance)?; - let reserved = NanoTokens::from_str(&b.reserved)?; - let address_balance = match balance.checked_add(reserved) { - Some(b) => b, - None => { - let msg = format!("Nanos overflowed adding maid {balance} + {reserved}"); - return Err(eyre!(msg)); - } - }; - total = match total.checked_add(address_balance) { - Some(b) => b, - None => { - let msg = format!("Nanos overflowed adding maid {total} + {address_balance}"); - return Err(eyre!(msg)); - } - }; - balances_map.insert(b.address.clone(), address_balance); - } - if total != supply { - let msg = format!("Incorrect snapshot total, got {total} want {supply}"); - return Err(eyre!(msg)); - } - // log the total number of balances that were parsed - info!("Parsed {} maid balances from the snapshot", balances.len()); - Ok(balances_map) -} - -fn load_maid_claims_from_local() -> Result> { - let mut claims = HashMap::new(); - // load from existing files - let claims_dir = get_claims_data_dir_path()?; - let file_list = std::fs::read_dir(claims_dir)?; - for file in file_list { - // add to hashmap - let file = file?; - let claim_csv = std::fs::read_to_string(file.path())?; - let claim = MaidClaim::from_csv_line(&claim_csv)?; - claims.insert(claim.address.clone(), claim); - } - Ok(claims) -} - -pub fn load_maid_claims() -> Result> { - info!("Loading claims for distributions"); - let mut claims = match load_maid_claims_from_local() { - Ok(claims) => claims, - Err(err) => { - info!("Failed to load claims from local, {err:?}"); - HashMap::new() - } - }; - info!("{} claims after reading existing files", claims.len()); - - // load from list on internet - info!("Fetching claims from {CLAIMS_URL}"); - let response = minreq::get(CLAIMS_URL).send()?; - // check the request is ok - if response.status_code != 200 { - println!( - "Claims request failed with http status {}", - response.status_code - ); - // The existing data is ok, no need to fail to start the server here - return Ok(claims); - } - // parse the response as csv, each row has format: - // address,pkhex,wallet,signature - let body = response.as_str()?; - let lines: Vec<&str> = body.trim().split('\n').collect(); - info!("{} claims rows from {CLAIMS_URL}", lines.len()); - for line in lines { - let claim = match MaidClaim::from_csv_line(line) { - Ok(c) => c, - Err(_) => { - continue; - } - }; - // validate this claim info all matches correctly - if claim.is_valid().is_err() { - continue; - } - // save this cliam to the file system - if claim.save_to_file().is_err() { - println!("Error saving claim to file"); - continue; - } - // add this claim to the hashmap - claims.insert(claim.address.clone(), claim); - } - info!("{} claims after reading from online list", claims.len()); - Ok(claims) -} - -fn maid_pk_matches_address(address: &str, pk_hex: &str) -> bool { - // parse the address - let addr = match bitcoin::Address::from_str(address) { - Ok(a) => a, - Err(_) => return false, - }; - let btc_addr = match addr.clone().require_network(bitcoin::Network::Bitcoin) { - Ok(a) => a, - Err(_) => return false, - }; - // parse the public key - let pk = match bitcoin::PublicKey::from_str(pk_hex) { - Ok(p) => p, - Err(_) => return false, - }; - // The public key may be for a p2pkh address (starting with 1) or a p2wpkh - // address (starting with 3) so we need to check both. - let is_p2pkh = btc_addr.is_related_to_pubkey(&pk); - if is_p2pkh { - return true; - } - let p2wpkh_addr = match bitcoin::Address::p2shwpkh(&pk, bitcoin::Network::Bitcoin) { - Ok(a) => a, - Err(_) => return false, - }; - let is_p2wpkh = p2wpkh_addr == addr; - if is_p2wpkh { - return true; - } - false -} - -fn check_signature(address: &MaidAddress, msg: &str, signature: &str) -> Result<()> { - let secp = bitcoin::secp256k1::Secp256k1::new(); // DevSkim: ignore DS440100 - let msg_hash = bitcoin::sign_message::signed_msg_hash(msg); - let sig = bitcoin::sign_message::MessageSignature::from_str(signature)?; - // Signatures doesn't work with p2wpkh-p2sh so always use p2pkh addr. - // This was double checked with electrum signature validation. - let mut addr = - bitcoin::Address::from_str(address)?.require_network(bitcoin::Network::Bitcoin)?; - let pubkey = pubkey_from_signature(msg, signature)?; - if address.starts_with('3') { - addr = bitcoin::Address::p2pkh(&pubkey, bitcoin::Network::Bitcoin); - } - // check the signature is correct - if !sig.is_signed_by_address(&secp, &addr, msg_hash)? { - return Err(eyre!("Invalid signature")); - } - // Check the pubkey in the signature matches the address. - // This prevents someone submitting a valid signature from a pubkey that - // doesn't match the address for the snapshot. - let pubkey_hex = hex::encode(pubkey.to_bytes()); - if !maid_pk_matches_address(address, &pubkey_hex) { - return Err(eyre!("Public key does not match address")); - } - Ok(()) -} - -fn pubkey_from_signature(msg: &str, signature: &str) -> Result { - let secp = bitcoin::secp256k1::Secp256k1::new(); // DevSkim: ignore DS440100 - let msg_hash = bitcoin::sign_message::signed_msg_hash(msg); - let sig = match bitcoin::sign_message::MessageSignature::from_base64(signature) { - Ok(s) => s, - Err(err) => { - let msg = format!("Error parsing signature: {err}"); - return Err(eyre!(msg)); - } - }; - let pubkey = sig.recover_pubkey(&secp, msg_hash)?; - Ok(pubkey) -} - -pub async fn distribute_from_maid_to_tokens( - client: Client, - snapshot: Snapshot, - claims: HashMap, -) { - for (addr, amount) in snapshot { - // check if this snapshot address has a pubkey - if !claims.contains_key(&addr) { - continue; - } - let claim = &claims[&addr]; - match create_distribution(&client, claim, &amount).await { - Ok(_) => {} - Err(err) => { - info!( - "Error creating distribution: {0} {err}", - claim.to_csv_line() - ); - } - } - } -} - -pub async fn handle_distribution_req( - client: &Client, - query: HashMap, - balances: Snapshot, -) -> Result { - let address = query - .get("address") - .ok_or(eyre!("Missing address in querystring"))? - .to_string(); - let wallet = query - .get("wallet") - .ok_or(eyre!("Missing wallet in querystring"))? - .to_string(); - let signature = query - .get("signature") - .ok_or(eyre!("Missing signature in querystring"))? - .to_string(); - let amount = balances - .get(&address) - .ok_or(eyre!("Address not in snapshot"))?; - // Bitcoin expects base64 standard encoding but the query string has - // base64 url encoding, so the sig is converted to standard encoding - let sig_bytes = base64::engine::general_purpose::URL_SAFE.decode(signature)?; - let sig = base64::engine::general_purpose::STANDARD.encode(sig_bytes); - let claim = MaidClaim::new(address, wallet, sig)?; - create_distribution(client, &claim, amount).await -} - -async fn create_distribution( - client: &Client, - claim: &MaidClaim, - amount: &NanoTokens, -) -> Result { - // validate the claim - if claim.is_valid().is_err() { - let claim_csv = claim.to_csv_line(); - let msg = format!("Not creating distribution for invalid claim: {claim_csv}"); - info!(msg); - return Err(eyre!(msg)); - } - // save this claim to file - claim.save_to_file()?; - // check if this distribution has already been created - let root = get_distributions_data_dir_path()?; - let dist_path = root.join(&claim.address); - if dist_path.exists() { - let dist_hex = match std::fs::read_to_string(dist_path.clone()) { - Ok(content) => content, - Err(err) => { - let msg = format!( - "Error reading distribution file {}: {}", - dist_path.display(), - err - ); - info!(msg); - return Err(eyre!(msg)); - } - }; - return Ok(dist_hex); - } - info!( - "Distributing {} for {} to {}", - amount, claim.address, claim.wallet - ); - - let faucet_dir = get_faucet_data_dir(); - let faucet_wallet = load_account_wallet_or_create_with_mnemonic(&faucet_dir, None)?; - // create a transfer to the claim wallet - let transfer_hex = - match send_tokens(client, faucet_wallet, &amount.to_string(), &claim.wallet).await { - Ok(t) => t, - Err(err) => { - let msg = format!("Failed send for {0}: {err}", claim.address); - info!(msg); - return Err(eyre!(msg)); - } - }; - let _ = match hex::decode(transfer_hex.clone()) { - Ok(t) => t, - Err(err) => { - let msg = format!("Failed to decode transfer for {0}: {err}", claim.address); - info!(msg); - return Err(eyre!(msg)); - } - }; - // save the transfer - match std::fs::write(dist_path.clone(), transfer_hex.clone()) { - Ok(_) => {} - Err(err) => { - let msg = format!( - "Failed to write transfer to file {}: {}", - dist_path.display(), - err - ); - info!(msg); - info!("The transfer hex that failed to write to file:"); - info!(transfer_hex); - return Err(eyre!(msg)); - } - }; - Ok(transfer_hex) -} - -#[cfg(all(test, feature = "distribution"))] -mod tests { - use super::*; - - use assert_fs::TempDir; - use bitcoin::{ - hashes::Hash, - secp256k1::{rand, Secp256k1}, - Address, Network, PublicKey, - }; - use sn_logging::LogBuilder; - use sn_transfers::{HotWallet, MainSecretKey, Transfer}; - - // This test is to confirm fetching 'MAID snapshop` and `Maid claims` list from website - // is working properly and giving consistent and expected result. - // - // Note: the current list will grow as testnets collect more claims - #[test] - fn fetching_from_network() -> Result<()> { - let snapshot = load_maid_snapshot()?; - println!("Maid snapshot got {:?} entries", snapshot.len()); - assert!(!snapshot.is_empty()); - - let claims = load_maid_claims()?; - println!("Got {:?} distribution claims", claims.len()); - - Ok(()) - } - - // This test will simulate a token distribution. - #[tokio::test] - async fn token_distribute_to_user() -> Result<()> { - let _log_guards = - LogBuilder::init_single_threaded_tokio_test("token_distribute_to_user test", true); - - let amount = NanoTokens::from(10); - - let secp = Secp256k1::new(); // DevSkim: ignore DS440100 - let (maid_secret_key, maid_public_key) = secp.generate_keypair(&mut rand::thread_rng()); - let maid_address = Address::p2pkh(&PublicKey::new(maid_public_key), Network::Bitcoin); - - let client_token_issuer = Client::quick_start(None).await?; - - // wallet comes from `safe wallet address` - let wallet_sk = bls::SecretKey::random(); - let wallet_pk_hex = wallet_sk.public_key().to_hex(); - // signature comes from bitcoin signing like electrum or trezor - let msg_hash = bitcoin::sign_message::signed_msg_hash(&wallet_pk_hex); - let msg = bitcoin::secp256k1::Message::from_digest(msg_hash.to_byte_array()); // DevSkim: ignore DS440100 - let secp_sig = secp.sign_ecdsa_recoverable(&msg, &maid_secret_key); - let signature = bitcoin::sign_message::MessageSignature { - signature: secp_sig, - compressed: true, - }; - let claim = MaidClaim::new( - maid_address.to_string(), - wallet_pk_hex, - signature.to_string(), - )?; - - let transfer_hex = create_distribution(&client_token_issuer, &claim, &amount).await?; - - let transfer = Transfer::from_hex(&transfer_hex)?; - - assert!(transfer - .cashnote_redemptions(&MainSecretKey::new(wallet_sk.clone())) - .is_ok()); - - let receiver_client = Client::new(bls::SecretKey::random(), None, None, None).await?; - let tmp_path = TempDir::new()?.path().to_owned(); - let receiver_wallet = - HotWallet::load_from_path(&tmp_path, Some(MainSecretKey::new(wallet_sk)))?; - - let mut cash_notes = receiver_client.receive(&transfer, &receiver_wallet).await?; - assert_eq!(cash_notes.len(), 1); - let cash_note = cash_notes.pop().unwrap(); - - assert_eq!(cash_note.value(), amount); - - Ok(()) - } - - #[test] - fn maidclaim_isvalid() -> Result<()> { - // Signatures generated using electrum to ensure interoperability. - - // prvkey for addr 17ig7... is L4DDUabuAU9AxVepwNkLBDmvrG4TXLJFDHoKPtkJdyDAPM3zHQhu - // sig is valid for wallet_a signed by addr_a - const MAID_ADDR_A: &str = "17ig7FYbSDaZZqVEjFmrGv7GSXBNLeJPNG"; - const MAID_PUBKEY_A: &str = - "0383f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc"; // DevSkim: ignore DS173237 - const WALLET_A: &str = "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a"; // DevSkim: ignore DS173237 - const SIG_A: &str = "HxaGOcmLu1BrSwzBi+KazC6XHbX/6B1Eyf9CnJrxB/OeKdJP9Jp38s+eqfBZ73wLG1OJW0mURhAmZkCsvBJayPM="; - - // prvkey for addr 1EbjF... is L2gzGZUqifkBG3jwwkyyfos8A67VvFhyrtqKU5cWkfEpySkFbaBR - // sig is valid for wallet_b signed by addr_b - const MAID_PUBKEY_B: &str = - "031bc89b9279ae36795910c0d173002504f2c22dd45368263a5f30ce68e8696e0f"; // DevSkim: ignore DS173237 - const WALLET_B: &str = "915d803d302bc1270e20de34413c270bdc4be632880e577719c2bf7d22e2c7b44388feef17fe5ac86b5d561697f2b3bf"; // DevSkim: ignore DS173237 - const SIG_B: &str = "Hy3zUK3YiEidzE+HpdgeoRoH3lkCrOoTh59TvoOiUdfJVKKLAVUuAydgIJkOTVU8JKdvbYPGiQhf7KCiNtLRIVU="; - - // not a valid bls wallet (starting with 0) - // sig is valid for wallet_c signed by addr_a - const WALLET_C: &str = "015d803d302bc1270e20de34413c270bdc4be632880e577719c2bf7d22e2c7b44388feef17fe5ac86b5d561697f2b3bf"; // DevSkim: ignore DS173237 - const SIG_C: &str = "IE8y8KSRKw3hz/rd9dzrJLOu24sAspuJgYr6VVGCga3FQQhzOEFDKZoDdrJORRI4Rvv7vFqRARQVaBKCobYh9sc="; - - // MaidClaim::new calls is_valid - let mc = MaidClaim::new( - MAID_ADDR_A.to_string(), - WALLET_A.to_string(), - SIG_A.to_string(), - ); - assert!(mc.is_ok()); - - // MaidClaim::new will fail if inputs are incorrect - // because new calls is_valid - let mc = MaidClaim::new( - MAID_ADDR_A.to_string(), - WALLET_A.to_string(), - SIG_B.to_string(), - ); - assert!(mc.is_err()); - - // valid - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_A.to_string(), - wallet: WALLET_A.to_string(), - signature: SIG_A.to_string(), - }; - assert!(mc.is_valid().is_ok()); - - // pk not matching address - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_B.to_string(), - wallet: WALLET_A.to_string(), - signature: SIG_A.to_string(), - }; - assert!(mc.is_valid().is_err()); - - // signature not matching message - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_A.to_string(), - wallet: WALLET_A.to_string(), - signature: SIG_B.to_string(), - }; - assert!(mc.is_valid().is_err()); - - // signature matches message but not address - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_B.to_string(), - wallet: WALLET_B.to_string(), - signature: SIG_B.to_string(), - }; - assert!(mc.is_valid().is_err()); - - // wallet is not a valid bls key - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_A.to_string(), - wallet: WALLET_C.to_string(), - signature: SIG_C.to_string(), - }; - assert!(mc.is_valid().is_err()); - - Ok(()) - } - - #[test] - fn pk_matches_addr() -> Result<()> { - // p2pkh compressed - assert!(maid_pk_matches_address( - "17ig7FYbSDaZZqVEjFmrGv7GSXBNLeJPNG", - "0383f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc", // DevSkim: ignore DS173237 - )); - - // p2pkh uncompressed - assert!(maid_pk_matches_address( - "1QK8WWMcDEFUVV2zKU8GSCwwuvAFWEs2QW", - "0483f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc4327efb5ba23543c8a6e63ddc09618e11b5d0d184bb69f964712d0894c005655", // DevSkim: ignore DS173237 - )); - - // p2wpkh-p2sh - assert!(maid_pk_matches_address( - "3GErA71Kz6Tn4QCLqoaDvMxD5cLgqQLykv", - "03952005f63e148735d244dc52253586c6ed89d1692599452e7daaa2a63a88619a", // DevSkim: ignore DS173237 - )); - - // mismatched returns false - assert!(!maid_pk_matches_address( - "17ig7FYbSDaZZqVEjFmrGv7GSXBNLeJPNG", - "031bc89b9279ae36795910c0d173002504f2c22dd45368263a5f30ce68e8696e0f", // DevSkim: ignore DS173237 - )); - - Ok(()) - } - - #[test] - fn pubkey_from_sig() -> Result<()> { - // Valid message and signature produces the corresponding public key. - // Signatures generated using electrum to ensure interoperability - - // p2pkh compressed - // electrum import key - // L4DDUabuAU9AxVepwNkLBDmvrG4TXLJFDHoKPtkJdyDAPM3zHQhu - let pubkey = pubkey_from_signature( - "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a", // DevSkim: ignore DS173237 - "HxaGOcmLu1BrSwzBi+KazC6XHbX/6B1Eyf9CnJrxB/OeKdJP9Jp38s+eqfBZ73wLG1OJW0mURhAmZkCsvBJayPM=", - )?; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - assert_eq!( - pubkey_hex, - "0383f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc" // DevSkim: ignore DS173237 - ); - - // p2pkh uncompressed - // electrum import key - // 5Jz2acAoqLr57YXzQuoiNS8sQtZQ3TBcVcaKsX5ybp9HtJiUSXq - let pubkey = pubkey_from_signature( - "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a", // DevSkim: ignore DS173237 - "Gw2YmGq5cbXVOCZKd1Uwku/kn9UWJ8QYGlho+FTXokfeNbQzINKli73rvoi39ssVN825kn5LgSdNu800e3w+eXE=", - )?; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - assert_eq!( - pubkey_hex, - "04952005f63e148735d244dc52253586c6ed89d1692599452e7daaa2a63a88619a0418114ad86aeda109dd924629bbf929e82c6ce5be948e4d21a95575a53e1f73" // DevSkim: ignore DS173237 - ); - - // p2wpkh-p2sh uncompressed - // electrum import key - // p2wpkh-p2sh:L2NhyLEHiNbb9tBnQY5BbbwjWSZzhpZqfJ26Hynxpf5bXL9sUm73 - let pubkey = pubkey_from_signature( - "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a", // DevSkim: ignore DS173237 - "Hw2YmGq5cbXVOCZKd1Uwku/kn9UWJ8QYGlho+FTXokfeNbQzINKli73rvoi39ssVN825kn5LgSdNu800e3w+eXE=", - )?; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - assert_eq!( - pubkey_hex, - "03952005f63e148735d244dc52253586c6ed89d1692599452e7daaa2a63a88619a" // DevSkim: ignore DS173237 - ); - - Ok(()) - } -} diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index bd73bb2773..090e3f8a12 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.36" +version = "0.2.37" [dependencies] chrono = "~0.4.19" diff --git a/sn_logging/src/layers.rs b/sn_logging/src/layers.rs index 4fbd3c07ea..91f771e6b9 100644 --- a/sn_logging/src/layers.rs +++ b/sn_logging/src/layers.rs @@ -266,6 +266,8 @@ fn get_logging_targets(logging_env_value: &str) -> Result> if contains_keyword_all_sn_logs || contains_keyword_verbose_sn_logs { let mut t = BTreeMap::from_iter(vec![ // bins + ("autonomi_cli".to_string(), Level::TRACE), + ("evm_testnet".to_string(), Level::TRACE), ("faucet".to_string(), Level::TRACE), ("safenode".to_string(), Level::TRACE), ("safenode_rpc_client".to_string(), Level::TRACE), @@ -273,8 +275,10 @@ fn get_logging_targets(logging_env_value: &str) -> Result> ("safenode_manager".to_string(), Level::TRACE), ("safenodemand".to_string(), Level::TRACE), // libs - ("sn_build_info".to_string(), Level::TRACE), ("autonomi".to_string(), Level::TRACE), + ("evmlib".to_string(), Level::TRACE), + ("sn_evm".to_string(), Level::TRACE), + ("sn_build_info".to_string(), Level::TRACE), ("sn_client".to_string(), Level::TRACE), ("sn_faucet".to_string(), Level::TRACE), ("sn_logging".to_string(), Level::TRACE), diff --git a/sn_logging/src/lib.rs b/sn_logging/src/lib.rs index e255d8c843..bb2b786729 100644 --- a/sn_logging/src/lib.rs +++ b/sn_logging/src/lib.rs @@ -17,12 +17,12 @@ use layers::TracingLayers; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use tracing::info; -use tracing_appender::non_blocking::WorkerGuard; use tracing_core::dispatcher::DefaultGuard; use tracing_subscriber::{prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt}; pub use error::Error; pub use layers::ReloadHandle; +pub use tracing_appender::non_blocking::WorkerGuard; // re-exporting the tracing crate's Level as it is used in our public API pub use tracing_core::Level; @@ -268,7 +268,7 @@ impl LogBuilder { let timestamp = chrono::Local::now().format("%Y-%m-%d_%H-%M-%S").to_string(); let path = dir .join("safe") - .join("client") + .join("autonomi") .join("logs") .join(format!("log_{timestamp}")); LogOutputDest::Path(path) diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index cd2ad4b26d..4a550a58a8 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.16" +version = "0.1.17" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index bf58cc5ea1..4f2270ff37 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,21 +8,21 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.18.4" +version = "0.19.0" [features] default = [] -local-discovery = ["libp2p/mdns"] +local = ["libp2p/mdns"] upnp = ["libp2p/upnp"] # tcp is automatically enabled when compiling for wasm32 websockets = ["libp2p/tcp"] open-metrics = ["libp2p/metrics", "prometheus-client", "hyper", "sysinfo"] encrypt-records = [] - +loud = [] [dependencies] lazy_static = "~1.4.0" -libp2p = { version = "0.53", features = [ +libp2p = { version = "0.54.1", features = [ "tokio", "dns", "kad", @@ -52,11 +52,13 @@ prometheus-client = { version = "0.22", optional = true } rand = { version = "~0.8.5", features = ["small_rng"] } rayon = "1.8.0" rmp-serde = "1.1.1" +self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path="../sn_build_info", version = "0.1.15" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0" } +sn_registers = { path = "../sn_registers", version = "0.4.0" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } @@ -93,7 +95,7 @@ crate-type = ["cdylib", "rlib"] [target.'cfg(target_arch = "wasm32")'.dependencies] getrandom = { version = "0.2.12", features = ["js"] } -libp2p = { version = "0.53", features = [ +libp2p = { version = "0.54.1", features = [ "tokio", "dns", "kad", diff --git a/sn_networking/src/bootstrap.rs b/sn_networking/src/bootstrap.rs index 608f8116af..f8b7cf1e59 100644 --- a/sn_networking/src/bootstrap.rs +++ b/sn_networking/src/bootstrap.rs @@ -134,8 +134,12 @@ impl ContinuousBootstrap { "It has been {LAST_PEER_ADDED_TIME_LIMIT:?} since we last added a peer to RT. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {no_peer_added_slowdown_interval_duration:?}" ); + // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. + #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] let mut new_interval = interval(no_peer_added_slowdown_interval_duration); - new_interval.tick().await; // the first tick completes immediately + #[cfg(not(target_arch = "wasm32"))] + new_interval.tick().await; + return (should_bootstrap, Some(new_interval)); } @@ -145,8 +149,13 @@ impl ContinuousBootstrap { let new_interval = BOOTSTRAP_INTERVAL * step; let new_interval = if new_interval > current_interval { info!("More peers have been added to our RT!. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {new_interval:?}"); + + // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. + #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] let mut interval = interval(new_interval); - interval.tick().await; // the first tick completes immediately + #[cfg(not(target_arch = "wasm32"))] + interval.tick().await; + Some(interval) } else { None diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index d70fc3eba5..b0eda19190 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -21,12 +21,12 @@ use libp2p::{ }, Multiaddr, PeerId, }; +use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics}; use sn_protocol::{ messages::{Cmd, Request, Response}, storage::{RecordHeader, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::{NanoTokens, PaymentQuote, QuotingMetrics}; use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, @@ -44,8 +44,6 @@ const REPLICATION_TIMEOUT: Duration = Duration::from_secs(45); #[derive(Debug, Eq, PartialEq)] pub enum NodeIssue { - /// Connection issues observed - ConnectionIssue, /// Data Replication failed ReplicationFailure, /// Close nodes have reported this peer as bad @@ -88,10 +86,10 @@ pub enum LocalSwarmCmd { key: RecordKey, sender: oneshot::Sender>, }, - /// GetLocalStoreCost for this node + /// GetLocalStoreCost for this node, also with the bad_node list close to the target GetLocalStoreCost { key: RecordKey, - sender: oneshot::Sender<(NanoTokens, QuotingMetrics)>, + sender: oneshot::Sender<(AttoTokens, QuotingMetrics, Vec)>, }, /// Notify the node received a payment. PaymentReceived, @@ -561,11 +559,45 @@ impl SwarmDriver { .store_cost(&key); self.record_metrics(Marker::StoreCost { - cost: cost.as_nano(), + cost: cost.as_atto(), quoting_metrics: "ing_metrics, }); - let _res = sender.send((cost, quoting_metrics)); + // To avoid sending entire list to client, sending those that: + // closer than the CLOSE_GROUP_SIZEth closest node to the target + let mut bad_nodes: Vec<_> = self + .bad_nodes + .iter() + .filter_map(|(peer_id, (_issue_list, is_bad))| { + if *is_bad { + Some(NetworkAddress::from_peer(*peer_id)) + } else { + None + } + }) + .collect(); + + // List is ordered already, hence the last one is always the one wanted + let kbucket_key = NetworkAddress::from_record_key(&key).as_kbucket_key(); + let closest_peers: Vec<_> = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_local_peers(&kbucket_key) + .map(|peer| peer.into_preimage()) + .take(CLOSE_GROUP_SIZE) + .collect(); + // In case of not enough clsest_peers, send the entire list + if closest_peers.len() >= CLOSE_GROUP_SIZE { + let boundary_peer = closest_peers[CLOSE_GROUP_SIZE - 1]; + let key_address = NetworkAddress::from_record_key(&key); + let boundary_distance = + key_address.distance(&NetworkAddress::from_peer(boundary_peer)); + bad_nodes + .retain(|peer_addr| key_address.distance(peer_addr) < boundary_distance); + } + + let _res = sender.send((cost, quoting_metrics, bad_nodes)); } LocalSwarmCmd::PaymentReceived => { cmd_string = "PaymentReceived"; @@ -596,11 +628,14 @@ impl SwarmDriver { Ok(record_header) => { match record_header.kind { RecordKind::Chunk => RecordType::Chunk, + RecordKind::Scratchpad => RecordType::Scratchpad, RecordKind::Spend | RecordKind::Register => { let content_hash = XorName::from_content(&record.value); RecordType::NonChunk(content_hash) } - RecordKind::ChunkWithPayment | RecordKind::RegisterWithPayment => { + RecordKind::ChunkWithPayment + | RecordKind::RegisterWithPayment + | RecordKind::ScratchpadWithPayment => { error!("Record {record_key:?} with payment shall not be stored locally."); return Err(NetworkError::InCorrectRecordHeader); } @@ -716,6 +751,7 @@ impl SwarmDriver { } LocalSwarmCmd::GetAllLocalRecordAddresses { sender } => { cmd_string = "GetAllLocalRecordAddresses"; + #[allow(clippy::mutable_key_type)] // for the Bytes in NetworkAddress let addresses = self .swarm .behaviour_mut() @@ -732,7 +768,7 @@ impl SwarmDriver { if let Some(distance) = range.0.ilog2() { let peers_in_kbucket = kbucket .iter() - .map(|peer_entry| peer_entry.node.key.clone().into_preimage()) + .map(|peer_entry| peer_entry.node.key.into_preimage()) .collect::>(); let _ = ilog2_kbuckets.insert(distance, peers_in_kbucket); } else { diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 7e0d9883d6..ec716cb4df 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -6,10 +6,6 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -#[cfg(feature = "open-metrics")] -use crate::metrics::NetworkMetricsRecorder; -#[cfg(feature = "open-metrics")] -use crate::metrics_service::run_metrics_server; use crate::{ bootstrap::{ContinuousBootstrap, BOOTSTRAP_INTERVAL}, circular_vec::CircularVec, @@ -27,10 +23,14 @@ use crate::{ target_arch::{interval, spawn, Instant}, GetRecordError, Network, CLOSE_GROUP_SIZE, }; +#[cfg(feature = "open-metrics")] +use crate::{ + metrics::service::run_metrics_server, metrics::NetworkMetricsRecorder, MetricsRegistries, +}; use crate::{transport, NodeIssue}; use futures::future::Either; use futures::StreamExt; -#[cfg(feature = "local-discovery")] +#[cfg(feature = "local")] use libp2p::mdns; use libp2p::Transport as _; use libp2p::{core::muxing::StreamMuxerBox, relay}; @@ -46,17 +46,18 @@ use libp2p::{ Multiaddr, PeerId, }; #[cfg(feature = "open-metrics")] -use prometheus_client::{metrics::info::Info, registry::Registry}; +use prometheus_client::metrics::info::Info; +use sn_evm::PaymentQuote; use sn_protocol::{ messages::{ChunkProof, Nonce, Request, Response}, - storage::RetryStrategy, + storage::{try_deserialize_record, RetryStrategy}, version::{ IDENTIFY_CLIENT_VERSION_STR, IDENTIFY_NODE_VERSION_STR, IDENTIFY_PROTOCOL_STR, REQ_RESPONSE_VERSION_STR, }, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, }; -use sn_transfers::PaymentQuote; +use sn_registers::SignedRegister; use std::{ collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}, fmt::Debug, @@ -76,6 +77,8 @@ pub(crate) const CLOSET_RECORD_CHECK_INTERVAL: Duration = Duration::from_secs(15 /// Interval over which we query relay manager to check if we can make any more reservations. pub(crate) const RELAY_MANAGER_RESERVATION_INTERVAL: Duration = Duration::from_secs(30); +const KAD_STREAM_PROTOCOL_ID: StreamProtocol = StreamProtocol::new("/autonomi/kad/1.0.0"); + /// The ways in which the Get Closest queries are used. pub(crate) enum PendingGetClosestType { /// The network discovery method is present at the networking layer @@ -138,11 +141,41 @@ pub struct GetRecordCfg { pub target_record: Option, /// Logs if the record was not fetched from the provided set of peers. pub expected_holders: HashSet, + /// For register record, only root value shall be checked, not the entire content. + pub is_register: bool, } impl GetRecordCfg { pub fn does_target_match(&self, record: &Record) -> bool { - self.target_record.as_ref().is_some_and(|t| t == record) + if let Some(ref target_record) = self.target_record { + if self.is_register { + let pretty_key = PrettyPrintRecordKey::from(&target_record.key); + + let fetched_register = match try_deserialize_record::(record) { + Ok(fetched_register) => fetched_register, + Err(err) => { + error!("When try to deserialize register from fetched record {pretty_key:?}, have error {err:?}"); + return false; + } + }; + let target_register = match try_deserialize_record::(target_record) + { + Ok(target_register) => target_register, + Err(err) => { + error!("When try to deserialize register from target record {pretty_key:?}, have error {err:?}"); + return false; + } + }; + + target_register.base_register() == fetched_register.base_register() + && target_register.ops() == fetched_register.ops() + } else { + target_record == record + } + } else { + // Not have target_record to check with + true + } } } @@ -203,7 +236,7 @@ pub(super) struct NodeBehaviour { pub(super) blocklist: libp2p::allow_block_list::Behaviour, pub(super) identify: libp2p::identify::Behaviour, - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] pub(super) mdns: mdns::tokio::Behaviour, #[cfg(feature = "upnp")] pub(super) upnp: libp2p::swarm::behaviour::toggle::Toggle, @@ -218,15 +251,12 @@ pub struct NetworkBuilder { is_behind_home_network: bool, keypair: Keypair, local: bool, - root_dir: PathBuf, listen_addr: Option, request_timeout: Option, concurrency_limit: Option, initial_peers: Vec, #[cfg(feature = "open-metrics")] - metrics_metadata_registry: Option, - #[cfg(feature = "open-metrics")] - metrics_registry: Option, + metrics_registries: Option, #[cfg(feature = "open-metrics")] metrics_server_port: Option, #[cfg(feature = "upnp")] @@ -234,20 +264,17 @@ pub struct NetworkBuilder { } impl NetworkBuilder { - pub fn new(keypair: Keypair, local: bool, root_dir: PathBuf) -> Self { + pub fn new(keypair: Keypair, local: bool) -> Self { Self { is_behind_home_network: false, keypair, local, - root_dir, listen_addr: None, request_timeout: None, concurrency_limit: None, initial_peers: Default::default(), #[cfg(feature = "open-metrics")] - metrics_metadata_registry: None, - #[cfg(feature = "open-metrics")] - metrics_registry: None, + metrics_registries: None, #[cfg(feature = "open-metrics")] metrics_server_port: None, #[cfg(feature = "upnp")] @@ -275,18 +302,11 @@ impl NetworkBuilder { self.initial_peers = initial_peers; } - /// Set the Registry that will be served at the `/metadata` endpoint. This Registry should contain only the static - /// info about the peer. Configure the `metrics_server_port` to enable the metrics server. - #[cfg(feature = "open-metrics")] - pub fn metrics_metadata_registry(&mut self, metrics_metadata_registry: Registry) { - self.metrics_metadata_registry = Some(metrics_metadata_registry); - } - - /// Set the Registry that will be served at the `/metrics` endpoint. + /// Set the registries used inside the metrics server. /// Configure the `metrics_server_port` to enable the metrics server. #[cfg(feature = "open-metrics")] - pub fn metrics_registry(&mut self, metrics_registry: Registry) { - self.metrics_registry = Some(metrics_registry); + pub fn metrics_registries(&mut self, registries: MetricsRegistries) { + self.metrics_registries = Some(registries); } #[cfg(feature = "open-metrics")] @@ -313,8 +333,11 @@ impl NetworkBuilder { /// # Errors /// /// Returns an error if there is a problem initializing the mDNS behaviour. - pub fn build_node(self) -> Result<(Network, mpsc::Receiver, SwarmDriver)> { - let mut kad_cfg = kad::Config::default(); + pub fn build_node( + self, + root_dir: PathBuf, + ) -> Result<(Network, mpsc::Receiver, SwarmDriver)> { + let mut kad_cfg = kad::Config::new(KAD_STREAM_PROTOCOL_ID); let _ = kad_cfg .set_kbucket_inserts(libp2p::kad::BucketInserts::Manual) // how often a node will replicate records that it has stored, aka copying the key-value pair to other nodes @@ -341,7 +364,7 @@ impl NetworkBuilder { let store_cfg = { // Configures the disk_store to store records under the provided path and increase the max record size - let storage_dir_path = self.root_dir.join("record_store"); + let storage_dir_path = root_dir.join("record_store"); if let Err(error) = std::fs::create_dir_all(&storage_dir_path) { return Err(NetworkError::FailedToCreateRecordStoreDir { path: storage_dir_path, @@ -351,7 +374,7 @@ impl NetworkBuilder { NodeRecordStoreConfig { max_value_bytes: MAX_PACKET_SIZE, // TODO, does this need to be _less_ than MAX_PACKET_SIZE storage_dir: storage_dir_path, - historic_quote_dir: self.root_dir.clone(), + historic_quote_dir: root_dir.clone(), ..Default::default() } }; @@ -399,7 +422,7 @@ impl NetworkBuilder { pub fn build_client(self) -> Result<(Network, mpsc::Receiver, SwarmDriver)> { // Create a Kademlia behaviour for client mode, i.e. set req/resp protocol // to outbound-only mode and don't listen on any address - let mut kad_cfg = kad::Config::default(); // default query timeout is 60 secs + let mut kad_cfg = kad::Config::new(KAD_STREAM_PROTOCOL_ID); // default query timeout is 60 secs // 1mb packet size let _ = kad_cfg @@ -446,11 +469,11 @@ impl NetworkBuilder { ); #[cfg(feature = "open-metrics")] - let mut metrics_registry = self.metrics_registry.unwrap_or_default(); + let mut metrics_registries = self.metrics_registries.unwrap_or_default(); // ==== Transport ==== #[cfg(feature = "open-metrics")] - let main_transport = transport::build_transport(&self.keypair, &mut metrics_registry); + let main_transport = transport::build_transport(&self.keypair, &mut metrics_registries); #[cfg(not(feature = "open-metrics"))] let main_transport = transport::build_transport(&self.keypair); let transport = if !self.local { @@ -480,18 +503,18 @@ impl NetworkBuilder { .boxed(); #[cfg(feature = "open-metrics")] - let network_metrics = if let Some(port) = self.metrics_server_port { - let network_metrics = NetworkMetricsRecorder::new(&mut metrics_registry); - let mut metadata_registry = self.metrics_metadata_registry.unwrap_or_default(); - let network_metadata_sub_registry = - metadata_registry.sub_registry_with_prefix("sn_networking"); + let metrics_recorder = if let Some(port) = self.metrics_server_port { + let metrics_recorder = NetworkMetricsRecorder::new(&mut metrics_registries); + let metadata_sub_reg = metrics_registries + .metadata + .sub_registry_with_prefix("sn_networking"); - network_metadata_sub_registry.register( + metadata_sub_reg.register( "peer_id", "Identifier of a peer of the network", Info::new(vec![("peer_id".to_string(), peer_id.to_string())]), ); - network_metadata_sub_registry.register( + metadata_sub_reg.register( "identify_protocol_str", "The protocol version string that is used to connect to the correct network", Info::new(vec![( @@ -500,8 +523,8 @@ impl NetworkBuilder { )]), ); - run_metrics_server(metrics_registry, metadata_registry, port); - Some(network_metrics) + run_metrics_server(metrics_registries, port); + Some(metrics_recorder) } else { None }; @@ -543,9 +566,9 @@ impl NetworkBuilder { #[cfg(feature = "open-metrics")] let mut node_record_store = node_record_store; #[cfg(feature = "open-metrics")] - if let Some(metrics) = &network_metrics { + if let Some(metrics_recorder) = &metrics_recorder { node_record_store = node_record_store - .set_record_count_metric(metrics.records_stored.clone()); + .set_record_count_metric(metrics_recorder.records_stored.clone()); } let store = UnifiedRecordStore::Node(node_record_store); @@ -561,7 +584,7 @@ impl NetworkBuilder { } }; - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] let mdns_config = mdns::Config { // lower query interval to speed up peer discovery // this increases traffic, but means we no longer have clients unable to connect @@ -570,7 +593,7 @@ impl NetworkBuilder { ..Default::default() }; - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] let mdns = mdns::tokio::Behaviour::new(mdns_config, peer_id)?; // Identify Behaviour @@ -616,7 +639,7 @@ impl NetworkBuilder { request_response, kademlia, identify, - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] mdns, }; @@ -643,13 +666,15 @@ impl NetworkBuilder { local: self.local, is_client, is_behind_home_network: self.is_behind_home_network, + #[cfg(feature = "open-metrics")] + close_group: Vec::with_capacity(CLOSE_GROUP_SIZE), peers_in_rt: 0, bootstrap, relay_manager, external_address_manager, replication_fetcher, #[cfg(feature = "open-metrics")] - network_metrics, + metrics_recorder, // kept here to ensure we can push messages to the channel // and not block the processing thread unintentionally network_cmd_sender: network_swarm_cmd_sender.clone(), @@ -678,7 +703,6 @@ impl NetworkBuilder { network_swarm_cmd_sender, local_swarm_cmd_sender, peer_id, - self.root_dir, self.keypair, ); @@ -693,6 +717,8 @@ pub struct SwarmDriver { pub(crate) local: bool, pub(crate) is_client: bool, pub(crate) is_behind_home_network: bool, + #[cfg(feature = "open-metrics")] + pub(crate) close_group: Vec, pub(crate) peers_in_rt: usize, pub(crate) bootstrap: ContinuousBootstrap, pub(crate) external_address_manager: ExternalAddressManager, @@ -700,7 +726,7 @@ pub struct SwarmDriver { /// The peers that are closer to our PeerId. Includes self. pub(crate) replication_fetcher: ReplicationFetcher, #[cfg(feature = "open-metrics")] - pub(crate) network_metrics: Option, + pub(crate) metrics_recorder: Option, network_cmd_sender: mpsc::Sender, pub(crate) local_cmd_sender: mpsc::Sender, @@ -965,8 +991,15 @@ impl SwarmDriver { pub(crate) fn record_metrics(&self, marker: Marker) { marker.log(); #[cfg(feature = "open-metrics")] - if let Some(network_metrics) = self.network_metrics.as_ref() { - network_metrics.record_from_marker(marker) + if let Some(metrics_recorder) = self.metrics_recorder.as_ref() { + metrics_recorder.record_from_marker(marker) + } + } + #[cfg(feature = "open-metrics")] + /// Updates metrics that rely on our current close group. + pub(crate) fn record_change_in_close_group(&self, new_close_group: Vec) { + if let Some(metrics_recorder) = self.metrics_recorder.as_ref() { + metrics_recorder.record_change_in_close_group(new_close_group); } } diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 2168bb892c..6534c84017 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -105,6 +105,8 @@ pub enum NetworkError { Wallet(#[from] sn_transfers::WalletError), #[error("Transfer Error {0}")] Transfer(#[from] sn_transfers::TransferError), + #[error("Evm payment Error {0}")] + EvmPaymemt(#[from] sn_evm::EvmError), #[error("Failed to sign the message with the PeerId keypair")] SigningFailed(#[from] libp2p::identity::SigningError), @@ -181,6 +183,9 @@ pub enum NetworkError { #[error("Error setting up behaviour: {0}")] BahviourErr(String), + + #[error("Register already exists at this address")] + RegisterAlreadyExists, } #[cfg(test)] diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index b9d0aef3d9..6551f6e5f0 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -8,7 +8,8 @@ use crate::{ driver::PendingGetClosestType, get_quorum_value, get_raw_signed_spends_from_record, - GetRecordCfg, GetRecordError, NetworkError, Result, SwarmDriver, CLOSE_GROUP_SIZE, + target_arch::Instant, GetRecordCfg, GetRecordError, NetworkError, Result, SwarmDriver, + CLOSE_GROUP_SIZE, }; use itertools::Itertools; use libp2p::kad::{ @@ -20,10 +21,7 @@ use sn_protocol::{ PrettyPrintRecordKey, }; use sn_transfers::SignedSpend; -use std::{ - collections::{hash_map::Entry, BTreeSet, HashSet}, - time::Instant, -}; +use std::collections::{hash_map::Entry, BTreeSet, HashSet}; use tokio::sync::oneshot; use xor_name::XorName; @@ -53,7 +51,7 @@ impl SwarmDriver { // following criteria: // 1, `stats.num_pending()` is 0 // 2, `stats.duration()` is longer than a defined period - current_closest.extend(closest_peers.peers.clone()); + current_closest.extend(closest_peers.peers.iter().map(|i| i.peer_id)); if current_closest.len() >= usize::from(K_VALUE) || step.last { let (get_closest_type, current_closest) = entry.remove(); match get_closest_type { @@ -101,7 +99,7 @@ impl SwarmDriver { // Trust them and leave for the caller to check whether they are enough. match err { GetClosestPeersError::Timeout { ref peers, .. } => { - current_closest.extend(peers); + current_closest.extend(peers.iter().map(|i| i.peer_id)); } } @@ -612,7 +610,7 @@ impl SwarmDriver { record: Record, cfg: &GetRecordCfg, ) -> Result<()> { - let res = if cfg.target_record.is_none() || cfg.does_target_match(&record) { + let res = if cfg.does_target_match(&record) { Ok(record) } else { Err(GetRecordError::RecordDoesNotMatch(record)) diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 20f45ca2c8..7af3b268c5 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -13,7 +13,7 @@ mod swarm; use crate::{driver::SwarmDriver, error::Result}; use core::fmt; use custom_debug::Debug as CustomDebug; -#[cfg(feature = "local-discovery")] +#[cfg(feature = "local")] use libp2p::mdns; use libp2p::{ kad::{Record, RecordKey, K_VALUE}, @@ -21,11 +21,15 @@ use libp2p::{ Multiaddr, PeerId, }; +use sn_evm::PaymentQuote; +#[cfg(feature = "open-metrics")] +use sn_protocol::CLOSE_GROUP_SIZE; use sn_protocol::{ messages::{Query, Request, Response}, NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::PaymentQuote; +#[cfg(feature = "open-metrics")] +use std::collections::HashSet; use std::{ collections::BTreeSet, fmt::{Debug, Formatter}, @@ -39,7 +43,7 @@ pub(super) enum NodeEvent { Upnp(libp2p::upnp::Event), MsgReceived(libp2p::request_response::Event), Kademlia(libp2p::kad::Event), - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] Mdns(Box), Identify(Box), RelayClient(Box), @@ -66,7 +70,7 @@ impl From for NodeEvent { } } -#[cfg(feature = "local-discovery")] +#[cfg(feature = "local")] impl From for NodeEvent { fn from(event: mdns::Event) -> Self { NodeEvent::Mdns(Box::new(event)) @@ -216,25 +220,61 @@ impl Debug for NetworkEvent { } impl SwarmDriver { + /// Check for changes in our close group + #[cfg(feature = "open-metrics")] + pub(crate) fn check_for_change_in_our_close_group(&mut self) { + // this includes self + let closest_k_peers = self.get_closest_k_value_local_peers(); + + let new_closest_peers: Vec<_> = + closest_k_peers.into_iter().take(CLOSE_GROUP_SIZE).collect(); + + let old = self.close_group.iter().cloned().collect::>(); + let new_members: Vec<_> = new_closest_peers + .iter() + .filter(|p| !old.contains(p)) + .collect(); + if !new_members.is_empty() { + debug!("The close group has been updated. The new members are {new_members:?}"); + debug!("New close group: {new_closest_peers:?}"); + self.close_group = new_closest_peers.clone(); + self.record_change_in_close_group(new_closest_peers); + } + } + /// Update state on addition of a peer to the routing table. pub(crate) fn update_on_peer_addition(&mut self, added_peer: PeerId) { self.peers_in_rt = self.peers_in_rt.saturating_add(1); - info!( - "New peer added to routing table: {added_peer:?}, now we have #{} connected peers", - self.peers_in_rt - ); + let n_peers = self.peers_in_rt; + info!("New peer added to routing table: {added_peer:?}, now we have #{n_peers} connected peers"); + + #[cfg(feature = "loud")] + println!("New peer added to routing table: {added_peer:?}, now we have #{n_peers} connected peers"); + self.log_kbuckets(&added_peer); self.send_event(NetworkEvent::PeerAdded(added_peer, self.peers_in_rt)); #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.peers_in_routing_table.set(self.peers_in_rt as i64); + if self.metrics_recorder.is_some() { + self.check_for_change_in_our_close_group(); + } + + #[cfg(feature = "open-metrics")] + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder + .peers_in_routing_table + .set(self.peers_in_rt as i64); } } /// Update state on removal of a peer from the routing table. pub(crate) fn update_on_peer_removal(&mut self, removed_peer: PeerId) { self.peers_in_rt = self.peers_in_rt.saturating_sub(1); + + // ensure we disconnect bad peer + // err result just means no connections were open + let _result = self.swarm.disconnect_peer_id(removed_peer); + info!( "Peer removed from routing table: {removed_peer:?}, now we have #{} connected peers", self.peers_in_rt @@ -243,8 +283,15 @@ impl SwarmDriver { self.send_event(NetworkEvent::PeerRemoved(removed_peer, self.peers_in_rt)); #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.peers_in_routing_table.set(self.peers_in_rt as i64); + if self.metrics_recorder.is_some() { + self.check_for_change_in_our_close_group(); + } + + #[cfg(feature = "open-metrics")] + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder + .peers_in_routing_table + .set(self.peers_in_rt as i64); } } @@ -284,8 +331,8 @@ impl SwarmDriver { let estimated_network_size = Self::estimate_network_size(peers_in_non_full_buckets, num_of_full_buckets); #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - let _ = metrics + if let Some(metrics_recorder) = &self.metrics_recorder { + let _ = metrics_recorder .estimated_network_size .set(estimated_network_size as i64); } @@ -301,6 +348,8 @@ impl SwarmDriver { } info!("kBucketTable has {index:?} kbuckets {total_peers:?} peers, {kbucket_table_stats:?}, estimated network size: {estimated_network_size:?}"); + #[cfg(feature = "loud")] + println!("Estimated network size: {estimated_network_size:?}"); } /// Estimate the number of nodes in the network diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index de16815b44..4550772bf4 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -238,22 +238,37 @@ impl SwarmDriver { let mut rng = thread_rng(); // 5% probability if more_than_one_key && rng.gen_bool(0.05) { - let keys_to_verify = self.select_verification_data_candidates(sender); + self.verify_peer_storage(sender.clone()); - if keys_to_verify.is_empty() { - debug!("No valid candidate to be checked against peer {holder:?}"); - } else { - self.send_event(NetworkEvent::ChunkProofVerification { - peer_id: holder, - keys_to_verify, - }); + // In additon to verify the sender, we also verify a random close node. + // This is to avoid malicious node escaping the check by never send a replication_list. + // With further reduced probability of 1% (5% * 20%) + if rng.gen_bool(0.2) { + let close_group_peers = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_local_peers(&self.self_peer_id.into()) + .map(|peer| peer.into_preimage()) + .take(CLOSE_GROUP_SIZE) + .collect_vec(); + if close_group_peers.len() == CLOSE_GROUP_SIZE { + loop { + let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate = NetworkAddress::from_peer(close_group_peers[index]); + if sender != candidate { + self.verify_peer_storage(candidate); + break; + } + } + } } } } /// Check among all chunk type records that we have, select those close to the peer, /// and randomly pick one as the verification candidate. - fn select_verification_data_candidates(&mut self, peer: NetworkAddress) -> Vec { + fn verify_peer_storage(&mut self, peer: NetworkAddress) { let mut closest_peers = self .swarm .behaviour_mut() @@ -268,7 +283,7 @@ impl SwarmDriver { peer_id } else { error!("Target {peer:?} is not a valid PeerId"); - return vec![]; + return; }; let all_keys = self @@ -309,9 +324,12 @@ impl SwarmDriver { // AND choose candidate from certain reduced range. if verify_candidates.len() > 50 { let index: usize = OsRng.gen_range(0..(verify_candidates.len() / 2)); - vec![verify_candidates[index].clone()] + self.send_event(NetworkEvent::ChunkProofVerification { + peer_id: target_peer, + keys_to_verify: vec![verify_candidates[index].clone()], + }); } else { - vec![] + debug!("No valid candidate to be checked against peer {peer:?}"); } } } diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index e0ae2ea687..982088f102 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -7,10 +7,10 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::LocalSwarmCmd, event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, - relay_manager::is_a_relayed_peer, target_arch::Instant, NetworkEvent, Result, SwarmDriver, + event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, relay_manager::is_a_relayed_peer, + target_arch::Instant, NetworkEvent, Result, SwarmDriver, }; -#[cfg(feature = "local-discovery")] +#[cfg(feature = "local")] use libp2p::mdns; #[cfg(feature = "open-metrics")] use libp2p::metrics::Recorder; @@ -33,8 +33,8 @@ impl SwarmDriver { // This does not record all the events. `SwarmEvent::Behaviour(_)` are skipped. Hence `.record()` has to be // called individually on each behaviour. #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&event); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.record(&event); } let start = Instant::now(); let event_string; @@ -47,8 +47,8 @@ impl SwarmDriver { } SwarmEvent::Behaviour(NodeEvent::Kademlia(kad_event)) => { #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&kad_event); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.record(&kad_event); } event_string = "kad_event"; self.handle_kad_event(kad_event)?; @@ -69,8 +69,8 @@ impl SwarmDriver { #[cfg(feature = "upnp")] SwarmEvent::Behaviour(NodeEvent::Upnp(upnp_event)) => { #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&upnp_event); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.record(&upnp_event); } event_string = "upnp_event"; info!(?upnp_event, "UPnP event"); @@ -84,8 +84,8 @@ impl SwarmDriver { SwarmEvent::Behaviour(NodeEvent::RelayServer(event)) => { #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&(*event)); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.record(&(*event)); } event_string = "relay_server_event"; @@ -109,14 +109,18 @@ impl SwarmDriver { SwarmEvent::Behaviour(NodeEvent::Identify(iden)) => { // Record the Identify event for metrics if the feature is enabled. #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&(*iden)); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.record(&(*iden)); } event_string = "identify"; match *iden { - libp2p::identify::Event::Received { peer_id, info } => { - debug!(%peer_id, ?info, "identify: received info"); + libp2p::identify::Event::Received { + peer_id, + info, + connection_id, + } => { + debug!(conn_id=%connection_id, %peer_id, ?info, "identify: received info"); if info.protocol_version != IDENTIFY_PROTOCOL_STR.to_string() { warn!(?info.protocol_version, "identify: {peer_id:?} does not have the same protocol. Our IDENTIFY_PROTOCOL_STR: {:?}", IDENTIFY_PROTOCOL_STR.as_str()); @@ -273,7 +277,7 @@ impl SwarmDriver { libp2p::identify::Event::Error { .. } => debug!("identify: {iden:?}"), } } - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] SwarmEvent::Behaviour(NodeEvent::Mdns(mdns_event)) => { event_string = "mdns"; match *mdns_event { @@ -325,6 +329,7 @@ impl SwarmDriver { self.send_event(NetworkEvent::NewListenAddr(address.clone())); info!("Local node is listening {listener_id:?} on {address:?}"); + println!("Local node is listening on {address:?}"); // TODO: make it print only once } SwarmEvent::ListenerClosed { listener_id, @@ -403,6 +408,11 @@ impl SwarmDriver { match err { TransportError::MultiaddrNotSupported(addr) => { warn!("Multiaddr not supported : {addr:?}"); + #[cfg(feature = "loud")] + { + println!("Multiaddr not supported : {addr:?}"); + println!("If this was your bootstrap peer, restart your node with a supported multiaddr"); + } // if we can't dial a peer on a given address, we should remove it from the routing table there_is_a_serious_issue = true } @@ -489,11 +499,6 @@ impl SwarmDriver { .remove_peer(&failed_peer_id) { self.update_on_peer_removal(*dead_peer.node.key.preimage()); - - self.handle_local_cmd(LocalSwarmCmd::RecordNodeIssue { - peer_id: failed_peer_id, - issue: crate::NodeIssue::ConnectionIssue, - })?; } } } @@ -639,7 +644,7 @@ impl SwarmDriver { /// Record the metrics on update of connection state. fn record_connection_metrics(&self) { #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { + if let Some(metrics) = &self.metrics_recorder { metrics .open_connections .set(self.live_connected_peers.len() as i64); diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index f08f361bf7..27f07bdb3e 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -19,8 +19,6 @@ mod external_address; mod log_markers; #[cfg(feature = "open-metrics")] mod metrics; -#[cfg(feature = "open-metrics")] -mod metrics_service; mod network_discovery; mod record_store; mod record_store_api; @@ -35,8 +33,6 @@ use cmd::LocalSwarmCmd; use xor_name::XorName; // re-export arch dependent deps for use in the crate, or above -pub use target_arch::{interval, sleep, spawn, Instant, Interval}; - pub use self::{ cmd::{NodeIssue, SwarmLocalState}, driver::{ @@ -47,9 +43,11 @@ pub use self::{ record_store::{calculate_cost_for_records, NodeRecordStore}, transfers::{get_raw_signed_spends_from_record, get_signed_spend_from_record}, }; +#[cfg(feature = "open-metrics")] +pub use metrics::service::MetricsRegistries; +pub use target_arch::{interval, sleep, spawn, Instant, Interval}; use self::{cmd::NetworkSwarmCmd, error::Result}; -use backoff::{Error as BackoffError, ExponentialBackoff}; use futures::future::select_all; use libp2p::{ identity::Keypair, @@ -59,17 +57,16 @@ use libp2p::{ Multiaddr, PeerId, }; use rand::Rng; +use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics, RewardsAddress}; use sn_protocol::{ error::Error as ProtocolError, messages::{ChunkProof, Cmd, Nonce, Query, QueryResponse, Request, Response}, storage::{RecordType, RetryStrategy}, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; -use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote, QuotingMetrics}; use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, + collections::{BTreeMap, HashMap}, net::IpAddr, - path::PathBuf, sync::Arc, }; use tokio::sync::{ @@ -79,7 +76,7 @@ use tokio::sync::{ use tokio::time::Duration; /// The type of quote for a selected payee. -pub type PayeeQuote = (PeerId, MainPubkey, PaymentQuote); +pub type PayeeQuote = (PeerId, RewardsAddress, PaymentQuote); /// The count of peers that will be considered as close to a record target, /// that a replication of the record shall be sent/accepted to/by the peer. @@ -148,7 +145,7 @@ pub fn sort_peers_by_key<'a, T>( Ok(sorted_peers) } -#[derive(Clone)] +#[derive(Clone, Debug)] /// API to interact with the underlying Swarm pub struct Network { inner: Arc, @@ -156,11 +153,11 @@ pub struct Network { /// The actual implementation of the Network. The other is just a wrapper around this, so that we don't expose /// the Arc from the interface. +#[derive(Debug)] struct NetworkInner { network_swarm_cmd_sender: mpsc::Sender, local_swarm_cmd_sender: mpsc::Sender, peer_id: PeerId, - root_dir_path: PathBuf, keypair: Keypair, } @@ -169,7 +166,6 @@ impl Network { network_swarm_cmd_sender: mpsc::Sender, local_swarm_cmd_sender: mpsc::Sender, peer_id: PeerId, - root_dir_path: PathBuf, keypair: Keypair, ) -> Self { Self { @@ -177,7 +173,6 @@ impl Network { network_swarm_cmd_sender, local_swarm_cmd_sender, peer_id, - root_dir_path, keypair, }), } @@ -193,11 +188,6 @@ impl Network { &self.inner.keypair } - /// Returns the root directory path of the instance. - pub fn root_dir_path(&self) -> &PathBuf { - &self.inner.root_dir_path - } - /// Get the sender to send a `NetworkSwarmCmd` to the underlying `Swarm`. pub(crate) fn network_swarm_cmd_sender(&self) -> &mpsc::Sender { &self.inner.network_swarm_cmd_sender @@ -346,8 +336,8 @@ impl Network { /// Get the store costs from the majority of the closest peers to the provided RecordKey. /// Record already exists will have a cost of zero to be returned. /// - /// Ignore the quote from any peers from `ignore_peers`. This is useful if we want to repay a different PeerId - /// on failure. + /// Ignore the quote from any peers from `ignore_peers`. + /// This is useful if we want to repay a different PeerId on failure. pub async fn get_store_costs_from_network( &self, record_address: NetworkAddress, @@ -355,7 +345,14 @@ impl Network { ) -> Result { // The requirement of having at least CLOSE_GROUP_SIZE // close nodes will be checked internally automatically. - let close_nodes = self.get_closest_peers(&record_address, true).await?; + let mut close_nodes = self.get_closest_peers(&record_address, true).await?; + // Filter out results from the ignored peers. + close_nodes.retain(|peer_id| !ignore_peers.contains(peer_id)); + + if close_nodes.is_empty() { + error!("Cann't get store_cost of {record_address:?}, as all close_nodes are ignored"); + return Err(NetworkError::NoStoreCostResponses); + } let request = Request::Query(Query::GetStoreCost(record_address.clone())); let responses = self @@ -377,8 +374,10 @@ impl Network { peer_address, }) => { // Check the quote itself is valid. - if quote.cost.as_nano() - != calculate_cost_for_records(quote.quoting_metrics.close_records_stored) + if quote.cost + != AttoTokens::from_u64(calculate_cost_for_records( + quote.quoting_metrics.close_records_stored, + )) { warn!("Received invalid quote from {peer_address:?}, {quote:?}"); continue; @@ -409,27 +408,7 @@ impl Network { self.send_req_ignore_reply(request, *peer_id); } - // Sort all_costs by the NetworkAddress proximity to record_address - all_costs.sort_by(|(peer_address_a, _, _), (peer_address_b, _, _)| { - record_address - .distance(peer_address_a) - .cmp(&record_address.distance(peer_address_b)) - }); - let ignore_peers = ignore_peers - .into_iter() - .map(NetworkAddress::from_peer) - .collect::>(); - - // Ensure we dont have any further out nodes than `close_group_majority()` - // This should ensure that if we didnt get all responses from close nodes, - // we're less likely to be paying a node that is not in the CLOSE_GROUP - // - // Also filter out the peers. - let all_costs = all_costs - .into_iter() - .filter(|(peer_address, ..)| !ignore_peers.contains(peer_address)) - .take(close_group_majority()) - .collect(); + filter_out_bad_nodes(&mut all_costs, record_address); get_fees_from_store_cost_responses(all_costs) } @@ -521,7 +500,7 @@ impl Network { ) -> Result { let retry_duration = cfg.retry_strategy.map(|strategy| strategy.get_duration()); backoff::future::retry( - ExponentialBackoff { + backoff::ExponentialBackoff { // None sets a random duration, but we'll be terminating with a BackoffError::Permanent, so retry will // be disabled. max_elapsed_time: retry_duration, @@ -539,7 +518,7 @@ impl Network { let result = receiver.await.map_err(|e| { error!("When fetching record {pretty_key:?}, encountered a channel error {e:?}"); NetworkError::InternalMsgChannelDropped - }).map_err(|err| BackoffError::Transient { err, retry_after: None })?; + }).map_err(|err| backoff::Error::Transient { err, retry_after: None })?; // log the results match &result { @@ -569,13 +548,13 @@ impl Network { // if we don't want to retry, throw permanent error if cfg.retry_strategy.is_none() { if let Err(e) = result { - return Err(BackoffError::Permanent(NetworkError::from(e))); + return Err(backoff::Error::Permanent(NetworkError::from(e))); } } if result.is_err() { debug!("Getting record from network of {pretty_key:?} via backoff..."); } - result.map_err(|err| BackoffError::Transient { + result.map_err(|err| backoff::Error::Transient { err: NetworkError::from(err), retry_after: None, }) @@ -588,7 +567,7 @@ impl Network { pub async fn get_local_storecost( &self, key: RecordKey, - ) -> Result<(NanoTokens, QuotingMetrics)> { + ) -> Result<(AttoTokens, QuotingMetrics, Vec)> { let (sender, receiver) = oneshot::channel(); self.send_local_swarm_cmd(LocalSwarmCmd::GetLocalStoreCost { key, sender }); @@ -628,6 +607,18 @@ impl Network { /// Put `Record` to network /// Optionally verify the record is stored after putting it to network /// If verify is on, retry multiple times within MAX_PUT_RETRY_DURATION duration. + #[cfg(target_arch = "wasm32")] + pub async fn put_record(&self, record: Record, cfg: &PutRecordCfg) -> Result<()> { + let pretty_key = PrettyPrintRecordKey::from(&record.key); + + info!("Attempting to PUT record with key: {pretty_key:?} to network, with cfg {cfg:?}"); + self.put_record_once(record.clone(), cfg).await + } + + /// Put `Record` to network + /// Optionally verify the record is stored after putting it to network + /// If verify is on, retry multiple times within MAX_PUT_RETRY_DURATION duration. + #[cfg(not(target_arch = "wasm32"))] pub async fn put_record(&self, record: Record, cfg: &PutRecordCfg) -> Result<()> { let pretty_key = PrettyPrintRecordKey::from(&record.key); @@ -635,7 +626,7 @@ impl Network { // So a long validation time will limit the number of PUT retries we attempt here. let retry_duration = cfg.retry_strategy.map(|strategy| strategy.get_duration()); backoff::future::retry( - ExponentialBackoff { + backoff::ExponentialBackoff { // None sets a random duration, but we'll be terminating with a BackoffError::Permanent, so retry will // be disabled. max_elapsed_time: retry_duration, @@ -650,9 +641,9 @@ impl Network { warn!("Failed to PUT record with key: {pretty_key:?} to network (retry via backoff) with error: {err:?}"); if cfg.retry_strategy.is_some() { - BackoffError::Transient { err, retry_after: None } + backoff::Error::Transient { err, retry_after: None } } else { - BackoffError::Permanent(err) + backoff::Error::Permanent(err) } }) @@ -960,7 +951,7 @@ impl Network { /// Given `all_costs` it will return the closest / lowest cost /// Closest requiring it to be within CLOSE_GROUP nodes fn get_fees_from_store_cost_responses( - all_costs: Vec<(NetworkAddress, MainPubkey, PaymentQuote)>, + all_costs: Vec<(NetworkAddress, RewardsAddress, PaymentQuote)>, ) -> Result { // Find the minimum cost using a linear scan with random tie break let mut rng = rand::thread_rng(); @@ -993,6 +984,32 @@ fn get_fees_from_store_cost_responses( Ok((payee_id, payee.1, payee.2)) } +/// According to the bad_nodes list collected via quotes, +/// candidate that received majority votes from others shall be ignored. +fn filter_out_bad_nodes( + all_costs: &mut Vec<(NetworkAddress, RewardsAddress, PaymentQuote)>, + record_address: NetworkAddress, +) { + let mut bad_node_votes: BTreeMap = BTreeMap::new(); + for (peer_addr, _reward_addr, quote) in all_costs.iter() { + let bad_nodes: Vec = match rmp_serde::from_slice("e.bad_nodes) { + Ok(bad_nodes) => bad_nodes, + Err(err) => { + error!("For record {record_address:?}, failed to recover bad_nodes from quote of {peer_addr:?} with error {err:?}"); + continue; + } + }; + for bad_node in bad_nodes { + let entry = bad_node_votes.entry(bad_node).or_default(); + *entry += 1; + } + } + all_costs.retain(|(peer_addr, _, _)| { + let entry = bad_node_votes.entry(peer_addr.clone()).or_default(); + *entry < close_group_majority() + }); +} + /// Get the value of the provided Quorum pub fn get_quorum_value(quorum: &Quorum) -> usize { match quorum { @@ -1113,7 +1130,7 @@ mod tests { use eyre::bail; use super::*; - use sn_transfers::PaymentQuote; + use sn_evm::PaymentQuote; #[test] fn test_get_fee_from_store_cost_responses() -> Result<()> { @@ -1121,18 +1138,18 @@ mod tests { // ensure we return the CLOSE_GROUP / 2 indexed price let mut costs = vec![]; for i in 1..CLOSE_GROUP_SIZE { - let addr = MainPubkey::new(bls::SecretKey::random().public_key()); + let addr = sn_evm::utils::dummy_address(); costs.push(( NetworkAddress::from_peer(PeerId::random()), addr, - PaymentQuote::test_dummy(Default::default(), NanoTokens::from(i as u64)), + PaymentQuote::test_dummy(Default::default(), AttoTokens::from_u64(i as u64)), )); } - let expected_price = costs[0].2.cost.as_nano(); + let expected_price = costs[0].2.cost.as_atto(); let (_peer_id, _key, price) = get_fees_from_store_cost_responses(costs)?; assert_eq!( - price.cost.as_nano(), + price.cost.as_atto(), expected_price, "price should be {expected_price}" ); @@ -1147,18 +1164,18 @@ mod tests { let responses_count = CLOSE_GROUP_SIZE as u64 - 1; let mut costs = vec![]; for i in 1..responses_count { - // push random MainPubkey and Nano - let addr = MainPubkey::new(bls::SecretKey::random().public_key()); + // push random addr and Nano + let addr = sn_evm::utils::dummy_address(); costs.push(( NetworkAddress::from_peer(PeerId::random()), addr, - PaymentQuote::test_dummy(Default::default(), NanoTokens::from(i)), + PaymentQuote::test_dummy(Default::default(), AttoTokens::from_u64(i)), )); println!("price added {i}"); } // this should be the lowest price - let expected_price = costs[0].2.cost.as_nano(); + let expected_price = costs[0].2.cost.as_atto(); let (_peer_id, _key, price) = match get_fees_from_store_cost_responses(costs) { Err(_) => bail!("Should not have errored as we have enough responses"), @@ -1166,7 +1183,7 @@ mod tests { }; assert_eq!( - price.cost.as_nano(), + price.cost.as_atto(), expected_price, "price should be {expected_price}" ); @@ -1177,8 +1194,7 @@ mod tests { #[test] fn test_network_sign_verify() -> eyre::Result<()> { let (network, _, _) = - NetworkBuilder::new(Keypair::generate_ed25519(), false, std::env::temp_dir()) - .build_client()?; + NetworkBuilder::new(Keypair::generate_ed25519(), false).build_client()?; let msg = b"test message"; let sig = network.sign(msg)?; assert!(network.verify(msg, &sig)); diff --git a/sn_networking/src/log_markers.rs b/sn_networking/src/log_markers.rs index 97ecb6c04b..38ec42c875 100644 --- a/sn_networking/src/log_markers.rs +++ b/sn_networking/src/log_markers.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use libp2p::PeerId; -use sn_transfers::QuotingMetrics; +use sn_evm::{Amount, QuotingMetrics}; // this gets us to_string easily enough use strum::Display; @@ -22,7 +22,7 @@ pub enum Marker<'a> { /// Store cost StoreCost { /// Cost - cost: u64, + cost: Amount, quoting_metrics: &'a QuotingMetrics, }, /// The peer has been considered as bad diff --git a/sn_networking/src/metrics/bad_node.rs b/sn_networking/src/metrics/bad_node.rs new file mode 100644 index 0000000000..7b64e248ec --- /dev/null +++ b/sn_networking/src/metrics/bad_node.rs @@ -0,0 +1,621 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::target_arch::interval; +use libp2p::PeerId; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{family::Family, gauge::Gauge}, +}; +use sn_protocol::CLOSE_GROUP_SIZE; +use std::{ + collections::{HashSet, VecDeque}, + time::{Duration, Instant}, +}; +use strum::IntoEnumIterator; + +const UPDATE_INTERVAL: Duration = Duration::from_secs(20); + +#[cfg(not(test))] +const MAX_EVICTED_CLOSE_GROUP_PEERS: usize = 5 * CLOSE_GROUP_SIZE; +#[cfg(test)] +const MAX_EVICTED_CLOSE_GROUP_PEERS: usize = CLOSE_GROUP_SIZE + 2; + +pub struct BadNodeMetrics { + shunned_count_across_time_frames: ShunnedCountAcrossTimeFrames, + shunned_by_close_group: ShunnedByCloseGroup, +} + +pub enum BadNodeMetricsMsg { + ShunnedByPeer(PeerId), + CloseGroupUpdated(Vec), +} + +struct ShunnedByCloseGroup { + metric_current_group: Gauge, + metric_old_group: Gauge, + + // trackers + close_group_peers: Vec, + old_close_group_peers: VecDeque, + old_new_group_shunned_list: HashSet, +} + +/// A struct to record the the number of reports against our node across different time frames. +struct ShunnedCountAcrossTimeFrames { + metric: Family, + shunned_report_tracker: Vec, +} + +struct ShunnedReportTracker { + time: Instant, + least_bucket_it_fits_in: TimeFrameType, +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq, EncodeLabelSet)] +pub struct TimeFrame { + time_frame: TimeFrameType, +} + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, EncodeLabelValue, strum::EnumIter)] +pub enum TimeFrameType { + LastTenMinutes, + LastHour, + LastSixHours, + LastDay, + LastWeek, + Indefinite, +} + +impl TimeFrameType { + #[cfg(not(test))] + fn get_duration_sec(&self) -> u64 { + match self { + TimeFrameType::LastTenMinutes => 10 * 60, + TimeFrameType::LastHour => 60 * 60, + TimeFrameType::LastSixHours => 6 * 60 * 60, + TimeFrameType::LastDay => 24 * 60 * 60, + TimeFrameType::LastWeek => 7 * 24 * 60 * 60, + TimeFrameType::Indefinite => u64::MAX, + } + } + + #[cfg(test)] + fn get_duration_sec(&self) -> u64 { + match self { + TimeFrameType::LastTenMinutes => 2, + TimeFrameType::LastHour => 4, + TimeFrameType::LastSixHours => 6, + TimeFrameType::LastDay => 8, + TimeFrameType::LastWeek => 10, + TimeFrameType::Indefinite => u64::MAX, + } + } + + fn next_time_frame(&self) -> Self { + match self { + TimeFrameType::LastTenMinutes => TimeFrameType::LastHour, + TimeFrameType::LastHour => TimeFrameType::LastSixHours, + TimeFrameType::LastSixHours => TimeFrameType::LastDay, + TimeFrameType::LastDay => TimeFrameType::LastWeek, + TimeFrameType::LastWeek => TimeFrameType::Indefinite, + TimeFrameType::Indefinite => TimeFrameType::Indefinite, + } + } +} + +impl BadNodeMetrics { + pub fn spawn_background_task( + time_based_shunned_count: Family, + shunned_by_close_group: Gauge, + shunned_by_old_close_group: Gauge, + ) -> tokio::sync::mpsc::Sender { + let mut bad_node_metrics = BadNodeMetrics { + shunned_count_across_time_frames: ShunnedCountAcrossTimeFrames { + metric: time_based_shunned_count, + shunned_report_tracker: Vec::new(), + }, + shunned_by_close_group: ShunnedByCloseGroup { + metric_current_group: shunned_by_close_group, + metric_old_group: shunned_by_old_close_group, + + close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), + // Shunned by old or new close group + old_new_group_shunned_list: HashSet::new(), + }, + }; + + let (tx, mut rx) = tokio::sync::mpsc::channel(10); + tokio::spawn(async move { + let mut update_interval = interval(UPDATE_INTERVAL); + update_interval.tick().await; + + loop { + tokio::select! { + msg = rx.recv() => { + match msg { + Some(BadNodeMetricsMsg::ShunnedByPeer(peer)) => { + bad_node_metrics.shunned_count_across_time_frames.record_shunned_metric(); + bad_node_metrics.shunned_by_close_group.record_shunned_metric(peer); + + } + Some(BadNodeMetricsMsg::CloseGroupUpdated(new_closest_peers)) => { + bad_node_metrics.shunned_by_close_group.update_close_group_peers(new_closest_peers); + } + None => break, + } + + + } + _ = update_interval.tick() => { + bad_node_metrics.shunned_count_across_time_frames.try_update_state(); + } + } + } + }); + tx + } +} + +impl ShunnedByCloseGroup { + pub(crate) fn record_shunned_metric(&mut self, peer: PeerId) { + // increment the metric if the peer is in the close group (new or old) and hasn't shunned us before + if !self.old_new_group_shunned_list.contains(&peer) { + if self.close_group_peers.contains(&peer) { + self.metric_current_group.inc(); + self.old_new_group_shunned_list.insert(peer); + } else if self.old_close_group_peers.contains(&peer) { + self.metric_old_group.inc(); + self.old_new_group_shunned_list.insert(peer); + } + } + } + + pub(crate) fn update_close_group_peers(&mut self, new_closest_peers: Vec) { + let new_members: Vec = new_closest_peers + .iter() + .filter(|p| !self.close_group_peers.contains(p)) + .cloned() + .collect(); + let evicted_members: Vec = self + .close_group_peers + .iter() + .filter(|p| !new_closest_peers.contains(p)) + .cloned() + .collect(); + for new_member in &new_members { + // if it has shunned us before, update the metrics. + if self.old_new_group_shunned_list.contains(new_member) { + self.metric_old_group.dec(); + self.metric_current_group.inc(); + } + } + + for evicted_member in &evicted_members { + self.old_close_group_peers.push_back(*evicted_member); + // if it has shunned us before, update the metrics. + if self.old_new_group_shunned_list.contains(evicted_member) { + self.metric_current_group.dec(); + self.metric_old_group.inc(); + } + } + + if !new_members.is_empty() { + debug!("The close group has been updated. The new members are {new_members:?}. The evicted members are {evicted_members:?}"); + self.close_group_peers = new_closest_peers; + + while self.old_close_group_peers.len() > MAX_EVICTED_CLOSE_GROUP_PEERS { + if let Some(removed_peer) = self.old_close_group_peers.pop_front() { + if self.old_new_group_shunned_list.remove(&removed_peer) { + self.metric_old_group.dec(); + } + } + } + } + } +} + +impl ShunnedCountAcrossTimeFrames { + fn record_shunned_metric(&mut self) { + let now = Instant::now(); + self.shunned_report_tracker.push(ShunnedReportTracker { + time: now, + least_bucket_it_fits_in: TimeFrameType::LastTenMinutes, + }); + + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + self.metric.get_or_create(&time_frame).inc(); + } + } + + fn try_update_state(&mut self) { + let now = Instant::now(); + let mut idx_to_remove = Vec::new(); + + for (idx, tracked_value) in self.shunned_report_tracker.iter_mut().enumerate() { + let time_elapsed_since_adding = now.duration_since(tracked_value.time).as_secs(); + + if time_elapsed_since_adding > tracked_value.least_bucket_it_fits_in.get_duration_sec() + { + let time_frame = TimeFrame { + time_frame: tracked_value.least_bucket_it_fits_in, + }; + self.metric.get_or_create(&time_frame).dec(); + + let new_time_frame = tracked_value.least_bucket_it_fits_in.next_time_frame(); + if new_time_frame == TimeFrameType::Indefinite { + idx_to_remove.push(idx); + } else { + tracked_value.least_bucket_it_fits_in = new_time_frame; + } + } + } + // remove the ones that are now indefinite + for idx in idx_to_remove { + self.shunned_report_tracker.remove(idx); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use eyre::Result; + + #[test] + fn update_should_move_to_next_timeframe() -> Result<()> { + let mut shunned_metrics = ShunnedCountAcrossTimeFrames { + metric: Family::default(), + shunned_report_tracker: Vec::new(), + }; + shunned_metrics.record_shunned_metric(); + + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; + assert!(matches!(current_state, TimeFrameType::LastTenMinutes)); + // all the counters should be 1 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } + + println!( + "current_state: {current_state:?}. Sleeping for {} sec", + current_state.get_duration_sec() + 1 + ); + std::thread::sleep(std::time::Duration::from_secs( + current_state.get_duration_sec() + 1, + )); + shunned_metrics.try_update_state(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; + assert!(matches!(current_state, TimeFrameType::LastHour)); + // all the counters except LastTenMinutes should be 1 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + if variant == TimeFrameType::LastTenMinutes { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 0); + } else { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } + } + + println!( + "current_state: {current_state:?}. Sleeping for {} sec", + current_state.get_duration_sec() + 1 + ); + std::thread::sleep(std::time::Duration::from_secs( + current_state.get_duration_sec() + 1, + )); + shunned_metrics.try_update_state(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; + assert!(matches!(current_state, TimeFrameType::LastSixHours)); + // all the counters except LastTenMinutes and LastHour should be 1 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + if variant == TimeFrameType::LastTenMinutes || variant == TimeFrameType::LastHour { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 0); + } else { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } + } + + println!( + "current_state: {current_state:?}. Sleeping for {} sec", + current_state.get_duration_sec() + 1 + ); + std::thread::sleep(std::time::Duration::from_secs( + current_state.get_duration_sec() + 1, + )); + shunned_metrics.try_update_state(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; + assert!(matches!(current_state, TimeFrameType::LastDay)); + // all the counters except LastTenMinutes, LastHour and LastSixHours should be 1 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + if variant == TimeFrameType::LastTenMinutes + || variant == TimeFrameType::LastHour + || variant == TimeFrameType::LastSixHours + { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 0); + } else { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } + } + + println!( + "current_state: {current_state:?}. Sleeping for {} sec", + current_state.get_duration_sec() + 1 + ); + std::thread::sleep(std::time::Duration::from_secs( + current_state.get_duration_sec() + 1, + )); + shunned_metrics.try_update_state(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; + assert!(matches!(current_state, TimeFrameType::LastWeek)); + // all the counters except LastTenMinutes, LastHour, LastSixHours and LastDay should be 1 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + if variant == TimeFrameType::LastTenMinutes + || variant == TimeFrameType::LastHour + || variant == TimeFrameType::LastSixHours + || variant == TimeFrameType::LastDay + { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 0); + } else { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } + } + + println!( + "current_state: {current_state:?}. Sleeping for {} sec", + current_state.get_duration_sec() + 1 + ); + std::thread::sleep(std::time::Duration::from_secs( + current_state.get_duration_sec() + 1, + )); + shunned_metrics.try_update_state(); + assert_eq!(shunned_metrics.shunned_report_tracker.len(), 0); + // all the counters except Indefinite should be 0 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + if variant == TimeFrameType::Indefinite { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } else { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 0); + } + } + + Ok(()) + } + + #[test] + fn metrics_should_not_be_updated_if_close_group_is_not_set() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), + old_new_group_shunned_list: HashSet::new(), + }; + + close_group_shunned.record_shunned_metric(PeerId::random()); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + Ok(()) + } + + #[test] + fn close_group_shunned_metric_should_be_updated_on_new_report() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), + old_new_group_shunned_list: HashSet::new(), + }; + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + // report by a peer in the close group should increment the metric + close_group_shunned.record_shunned_metric(close_group_shunned.close_group_peers[0]); + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // report by same peer should not increment the metric + close_group_shunned.record_shunned_metric(close_group_shunned.close_group_peers[0]); + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // report by a different peer should increment the metric + close_group_shunned.record_shunned_metric(close_group_shunned.close_group_peers[1]); + assert_eq!(close_group_shunned.metric_current_group.get(), 2); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // report by a peer that is not in the close group should not increment the metric + close_group_shunned.record_shunned_metric(PeerId::random()); + assert_eq!(close_group_shunned.metric_current_group.get(), 2); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + Ok(()) + } + + #[test] + fn change_in_close_group_should_update_the_metrics() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), + old_new_group_shunned_list: HashSet::new(), + }; + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + let old_member = close_group_shunned.close_group_peers[0]; + close_group_shunned.record_shunned_metric(old_member); + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // update close group + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + ]); + + // the peer that shunned us before should now be in the old group + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + + // report by the old member should not increment the metric + close_group_shunned.record_shunned_metric(old_member); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + + // update close group with old member + close_group_shunned.update_close_group_peers(vec![ + old_member, + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + ]); + + // the metrics of current_group and old_group should be updated + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + Ok(()) + } + + #[test] + fn update_close_group_metrics_on_reaching_max_evicted_peer_count() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), + old_new_group_shunned_list: HashSet::new(), + }; + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + + // evict 1 members + let old_member_1 = close_group_shunned.close_group_peers[0]; + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // evict 1 members + let old_member_2 = close_group_shunned.close_group_peers[0]; + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // report by the evicted members should increment the old group metric + close_group_shunned.record_shunned_metric(old_member_1); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + close_group_shunned.record_shunned_metric(old_member_2); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 2); + + // evict all the members + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + + // the metrics should still remain the same + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 2); + + // evict 1 more members to cross the threshold + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // the metric from the member_1 should be removed + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + assert!(!close_group_shunned + .old_close_group_peers + .contains(&old_member_1)); + assert!(close_group_shunned + .old_close_group_peers + .contains(&old_member_2)); + + // evict 1 more member + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // the metric from the member_2 should be removed + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + assert!(!close_group_shunned + .old_close_group_peers + .contains(&old_member_1)); + + Ok(()) + } +} diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index a7fdfbeee1..feb48bafd6 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -6,21 +6,26 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{log_markers::Marker, target_arch::sleep}; -use libp2p::metrics::{Metrics as Libp2pMetrics, Recorder}; +// Implementation to record `libp2p::upnp::Event` metrics +mod bad_node; +pub mod service; #[cfg(feature = "upnp")] -use prometheus_client::metrics::family::Family; +mod upnp; + +use crate::MetricsRegistries; +use crate::{log_markers::Marker, target_arch::sleep}; +use bad_node::{BadNodeMetrics, BadNodeMetricsMsg, TimeFrame}; +use libp2p::{ + metrics::{Metrics as Libp2pMetrics, Recorder}, + PeerId, +}; use prometheus_client::{ + metrics::family::Family, metrics::{counter::Counter, gauge::Gauge}, - registry::Registry, }; use sysinfo::{Pid, ProcessRefreshKind, System}; use tokio::time::Duration; -// Implementation to record `libp2p::upnp::Event` metrics -#[cfg(feature = "upnp")] -mod upnp; - const UPDATE_INTERVAL: Duration = Duration::from_secs(15); const TO_MB: u64 = 1_000_000; @@ -50,16 +55,29 @@ pub(crate) struct NetworkMetricsRecorder { // bad node metrics bad_peers_count: Counter, shunned_count: Counter, + #[allow(dead_code)] // updated by background task + shunned_count_across_time_frames: Family, + #[allow(dead_code)] + shunned_by_close_group: Gauge, + #[allow(dead_code)] + shunned_by_old_close_group: Gauge, // system info process_memory_used_mb: Gauge, process_cpu_usage_percentage: Gauge, + + // helpers + bad_nodes_notifier: tokio::sync::mpsc::Sender, } impl NetworkMetricsRecorder { - pub fn new(registry: &mut Registry) -> Self { - let libp2p_metrics = Libp2pMetrics::new(registry); - let sub_registry = registry.sub_registry_with_prefix("sn_networking"); + pub fn new(registries: &mut MetricsRegistries) -> Self { + // ==== Standard metrics ===== + + let libp2p_metrics = Libp2pMetrics::new(&mut registries.standard_metrics); + let sub_registry = registries + .standard_metrics + .sub_registry_with_prefix("sn_networking"); let records_stored = Gauge::default(); sub_registry.register( @@ -163,6 +181,37 @@ impl NetworkMetricsRecorder { live_time.clone(), ); + let shunned_by_close_group = Gauge::default(); + sub_registry.register( + "shunned_by_close_group", + "The number of close group peers that have shunned our node", + shunned_by_close_group.clone(), + ); + + let shunned_by_old_close_group = Gauge::default(); + sub_registry.register( + "shunned_by_old_close_group", + "The number of close group peers that have shunned our node. This contains the peers that were once in our close group but have since been evicted.", + shunned_by_old_close_group.clone(), + ); + + // ==== Extended metrics ===== + + let extended_metrics_sub_registry = registries + .extended_metrics + .sub_registry_with_prefix("sn_networking"); + let shunned_count_across_time_frames = Family::default(); + extended_metrics_sub_registry.register( + "shunned_count_across_time_frames", + "The number of times our node has been shunned by other nodes across different time frames", + shunned_count_across_time_frames.clone(), + ); + + let bad_nodes_notifier = BadNodeMetrics::spawn_background_task( + shunned_count_across_time_frames.clone(), + shunned_by_close_group.clone(), + shunned_by_old_close_group.clone(), + ); let network_metrics = Self { libp2p_metrics, #[cfg(feature = "upnp")] @@ -180,10 +229,15 @@ impl NetworkMetricsRecorder { live_time, bad_peers_count, + shunned_count_across_time_frames, shunned_count, + shunned_by_close_group, + shunned_by_old_close_group, process_memory_used_mb, process_cpu_usage_percentage, + + bad_nodes_notifier, }; network_metrics.system_metrics_recorder_task(); @@ -225,14 +279,30 @@ impl NetworkMetricsRecorder { Marker::PeerConsideredAsBad { .. } => { let _ = self.bad_peers_count.inc(); } - Marker::FlaggedAsBadNode { .. } => { + Marker::FlaggedAsBadNode { flagged_by } => { let _ = self.shunned_count.inc(); + let bad_nodes_notifier = self.bad_nodes_notifier.clone(); + let flagged_by = *flagged_by; + crate::target_arch::spawn(async move { + if let Err(err) = bad_nodes_notifier + .send(BadNodeMetricsMsg::ShunnedByPeer(flagged_by)) + .await + { + error!("Failed to send shunned report via notifier: {err:?}"); + } + }); } Marker::StoreCost { cost, quoting_metrics, } => { - let _ = self.store_cost.set(cost as i64); + let _ = self.store_cost.set(cost.try_into().unwrap_or(i64::MAX)); + let _ = self.relevant_records.set( + quoting_metrics + .close_records_stored + .try_into() + .unwrap_or(i64::MAX), + ); let _ = self .relevant_records .set(quoting_metrics.close_records_stored as i64); @@ -245,6 +315,18 @@ impl NetworkMetricsRecorder { _ => {} } } + + pub(crate) fn record_change_in_close_group(&self, new_close_group: Vec) { + let bad_nodes_notifier = self.bad_nodes_notifier.clone(); + crate::target_arch::spawn(async move { + if let Err(err) = bad_nodes_notifier + .send(BadNodeMetricsMsg::CloseGroupUpdated(new_close_group)) + .await + { + error!("Failed to send shunned report via notifier: {err:?}"); + } + }); + } } /// Impl the Recorder traits again for our struct. diff --git a/sn_networking/src/metrics_service.rs b/sn_networking/src/metrics/service.rs similarity index 57% rename from sn_networking/src/metrics_service.rs rename to sn_networking/src/metrics/service.rs index 4d8e0a165f..e64ae01701 100644 --- a/sn_networking/src/metrics_service.rs +++ b/sn_networking/src/metrics/service.rs @@ -16,22 +16,27 @@ use std::{ task::{Context, Poll}, }; +/// The types of metrics that are exposed via the various endpoints. +#[derive(Default, Debug)] +pub struct MetricsRegistries { + pub standard_metrics: Registry, + pub extended_metrics: Registry, + pub metadata: Registry, +} + const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; -pub(crate) fn run_metrics_server( - metrics_registry: Registry, - metadata_registry: Registry, - port: u16, -) { +pub(crate) fn run_metrics_server(registries: MetricsRegistries, port: u16) { // todo: containers don't work with localhost. let addr = ([127, 0, 0, 1], port).into(); tokio::spawn(async move { - let server = - Server::bind(&addr).serve(MakeMetricService::new(metrics_registry, metadata_registry)); + let server = Server::bind(&addr).serve(MakeMetricService::new(registries)); + // keep these for programs that might be grepping this info info!("Metrics server on http://{}/metrics", server.local_addr()); - info!("Metadata server on http://{}/metadata", server.local_addr()); println!("Metrics server on http://{}/metrics", server.local_addr()); + + info!("Metrics server on http://{} Available endpoints: /metrics, /metrics_extended, /metadata", server.local_addr()); // run the server forever if let Err(e) = server.await { error!("server error: {}", e); @@ -42,17 +47,22 @@ pub(crate) fn run_metrics_server( type SharedRegistry = Arc>; pub(crate) struct MetricService { - metrics_registry: SharedRegistry, - metadata_registry: SharedRegistry, + standard_registry: SharedRegistry, + extended_registry: SharedRegistry, + metadata: SharedRegistry, } impl MetricService { - fn get_metrics_registry(&mut self) -> SharedRegistry { - Arc::clone(&self.metrics_registry) + fn get_standard_metrics_registry(&mut self) -> SharedRegistry { + Arc::clone(&self.standard_registry) + } + + fn get_extended_metrics_registry(&mut self) -> SharedRegistry { + Arc::clone(&self.extended_registry) } fn get_metadata_registry(&mut self) -> SharedRegistry { - Arc::clone(&self.metadata_registry) + Arc::clone(&self.metadata) } fn respond_with_metrics(&mut self) -> Result> { @@ -65,10 +75,52 @@ impl MetricService { .map_err(|_| NetworkError::NetworkMetricError)?, ); - let reg = self.get_metrics_registry(); + let reg = self.get_standard_metrics_registry(); let reg = reg.lock().map_err(|_| NetworkError::NetworkMetricError)?; encode(&mut response.body_mut(), ®).map_err(|err| { - error!("Failed to encode the metrics Registry {err:?}"); + error!("Failed to encode the standard metrics Registry {err:?}"); + NetworkError::NetworkMetricError + })?; + + *response.status_mut() = StatusCode::OK; + + Ok(response) + } + + fn respond_with_metrics_extended(&mut self) -> Result> { + let mut response: Response = Response::default(); + + response.headers_mut().insert( + hyper::header::CONTENT_TYPE, + METRICS_CONTENT_TYPE + .try_into() + .map_err(|_| NetworkError::NetworkMetricError)?, + ); + + let standard_registry = self.get_standard_metrics_registry(); + let standard_registry = standard_registry + .lock() + .map_err(|_| NetworkError::NetworkMetricError)?; + encode(&mut response.body_mut(), &standard_registry).map_err(|err| { + error!("Failed to encode the standard metrics Registry {err:?}"); + NetworkError::NetworkMetricError + })?; + + // remove the EOF line from the response + let mut buffer = response.body().split("\n").collect::>(); + let _ = buffer.pop(); + let _ = buffer.pop(); + buffer.push("\n"); + let mut buffer = buffer.join("\n"); + let _ = buffer.pop(); + *response.body_mut() = buffer; + + let extended_registry = self.get_extended_metrics_registry(); + let extended_registry = extended_registry + .lock() + .map_err(|_| NetworkError::NetworkMetricError)?; + encode(&mut response.body_mut(), &extended_registry).map_err(|err| { + error!("Failed to encode the standard metrics Registry {err:?}"); NetworkError::NetworkMetricError })?; @@ -91,7 +143,7 @@ impl MetricService { let reg = self.get_metadata_registry(); let reg = reg.lock().map_err(|_| NetworkError::NetworkMetricError)?; encode(&mut response.body_mut(), ®).map_err(|err| { - error!("Failed to encode the metrics Registry {err:?}"); + error!("Failed to encode the metadata Registry {err:?}"); NetworkError::NetworkMetricError })?; @@ -133,6 +185,12 @@ impl Service> for MetricService { Ok(resp) => resp, Err(_) => self.respond_with_500_server_error(), } + } else if req_method == Method::GET && req_path == "/metrics_extended" { + // Encode and serve metrics from registry. + match self.respond_with_metrics_extended() { + Ok(resp) => resp, + Err(_) => self.respond_with_500_server_error(), + } } else if req_method == Method::GET && req_path == "/metadata" { match self.respond_with_metadata() { Ok(resp) => resp, @@ -146,18 +204,17 @@ impl Service> for MetricService { } pub(crate) struct MakeMetricService { - metrics_registry: SharedRegistry, - metadata_registry: SharedRegistry, + standard_registry: SharedRegistry, + extended_registry: SharedRegistry, + metadata: SharedRegistry, } impl MakeMetricService { - pub(crate) fn new( - metrics_registry: Registry, - metadata_registry: Registry, - ) -> MakeMetricService { + pub(crate) fn new(registries: MetricsRegistries) -> MakeMetricService { MakeMetricService { - metrics_registry: Arc::new(Mutex::new(metrics_registry)), - metadata_registry: Arc::new(Mutex::new(metadata_registry)), + standard_registry: Arc::new(Mutex::new(registries.standard_metrics)), + extended_registry: Arc::new(Mutex::new(registries.extended_metrics)), + metadata: Arc::new(Mutex::new(registries.metadata)), } } } @@ -172,12 +229,15 @@ impl Service for MakeMetricService { } fn call(&mut self, _: T) -> Self::Future { - let metrics_registry = Arc::clone(&self.metrics_registry); - let metadata_registry = Arc::clone(&self.metadata_registry); + let standard_registry = Arc::clone(&self.standard_registry); + let extended_registry = Arc::clone(&self.extended_registry); + let metadata = Arc::clone(&self.metadata); + let fut = async move { Ok(MetricService { - metrics_registry, - metadata_registry, + standard_registry, + extended_registry, + metadata, }) }; Box::pin(fut) diff --git a/sn_networking/src/metrics/upnp.rs b/sn_networking/src/metrics/upnp.rs index 9dd3b923b7..593e7eaeab 100644 --- a/sn_networking/src/metrics/upnp.rs +++ b/sn_networking/src/metrics/upnp.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; #[derive(Debug, Clone, Hash, PartialEq, Eq, EncodeLabelSet)] diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 17308f196f..ee4e413c5e 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -30,11 +30,11 @@ use prometheus_client::metrics::gauge::Gauge; use rand::RngCore; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; +use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{ storage::{RecordHeader, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::{NanoTokens, QuotingMetrics}; use std::collections::VecDeque; use std::{ borrow::Cow, @@ -57,7 +57,7 @@ use xor_name::XorName; const MAX_RECORDS_COUNT: usize = 16 * 1024; /// The maximum number of records to cache in memory. -const MAX_RECORDS_CACHE_SIZE: usize = 100; +const MAX_RECORDS_CACHE_SIZE: usize = 25; /// File name of the recorded historical quoting metrics. const HISTORICAL_QUOTING_METRICS_FILENAME: &str = "historic_quoting_metrics"; @@ -461,7 +461,7 @@ impl NodeRecordStore { // result in mis-calculation of relevant records. pub fn cleanup_unrelevant_records(&mut self) { let accumulated_records = self.records.len(); - if accumulated_records < 6 * MAX_RECORDS_COUNT / 10 { + if accumulated_records < MAX_RECORDS_COUNT * 6 / 10 { return; } @@ -651,7 +651,7 @@ impl NodeRecordStore { } /// Calculate the cost to store data for our current store state - pub(crate) fn store_cost(&self, key: &Key) -> (NanoTokens, QuotingMetrics) { + pub(crate) fn store_cost(&self, key: &Key) -> (AttoTokens, QuotingMetrics) { let records_stored = self.records.len(); let record_keys_as_hashset: HashSet<&Key> = self.records.keys().collect(); @@ -685,7 +685,7 @@ impl NodeRecordStore { // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): info!("Cost is now {cost:?} for quoting_metrics {quoting_metrics:?}"); - (NanoTokens::from(cost), quoting_metrics) + (AttoTokens::from_u64(cost), quoting_metrics) } /// Notify the node received a payment. @@ -955,16 +955,20 @@ mod tests { use super::*; use bls::SecretKey; + use xor_name::XorName; + use bytes::Bytes; use eyre::{bail, ContextCompat}; use libp2p::kad::K_VALUE; use libp2p::{core::multihash::Multihash, kad::RecordKey}; use quickcheck::*; - use sn_transfers::{MainPubkey, PaymentQuote}; + use sn_evm::utils::dummy_address; + use sn_evm::{PaymentQuote, RewardsAddress}; + use sn_protocol::storage::{ + try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, Scratchpad, + }; use std::collections::BTreeMap; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; - - use sn_protocol::storage::{try_serialize_record, ChunkAddress}; use tokio::runtime::Runtime; use tokio::time::{sleep, Duration}; @@ -1153,6 +1157,152 @@ mod tests { assert!(store.get(&r.key).is_none()); } + #[tokio::test] + async fn can_store_and_retrieve_chunk() { + let temp_dir = std::env::temp_dir(); + let store_config = NodeRecordStoreConfig { + storage_dir: temp_dir, + ..Default::default() + }; + let self_id = PeerId::random(); + let (network_event_sender, _) = mpsc::channel(1); + let (swarm_cmd_sender, _) = mpsc::channel(1); + + let mut store = NodeRecordStore::with_config( + self_id, + store_config, + network_event_sender, + swarm_cmd_sender, + ); + + // Create a chunk + let chunk_data = Bytes::from_static(b"Test chunk data"); + let chunk = Chunk::new(chunk_data.clone()); + let chunk_address = *chunk.address(); + + // Create a record from the chunk + let record = Record { + key: NetworkAddress::ChunkAddress(chunk_address).to_record_key(), + value: chunk_data.to_vec(), + expires: None, + publisher: None, + }; + + // Store the chunk using put_verified + assert!(store + .put_verified(record.clone(), RecordType::Chunk) + .is_ok()); + + // Mark as stored (simulating the CompletedWrite event) + store.mark_as_stored(record.key.clone(), RecordType::Chunk); + + // Verify the chunk is stored + let stored_record = store.get(&record.key); + assert!(stored_record.is_some(), "Chunk should be stored"); + + if let Some(stored) = stored_record { + assert_eq!( + stored.value, chunk_data, + "Stored chunk data should match original" + ); + + let stored_address = ChunkAddress::new(XorName::from_content(&stored.value)); + assert_eq!( + stored_address, chunk_address, + "Stored chunk address should match original" + ); + } + + // Clean up + store.remove(&record.key); + assert!( + store.get(&record.key).is_none(), + "Chunk should be removed after cleanup" + ); + } + + #[tokio::test] + async fn can_store_and_retrieve_scratchpad() -> eyre::Result<()> { + let temp_dir = std::env::temp_dir(); + let store_config = NodeRecordStoreConfig { + storage_dir: temp_dir, + ..Default::default() + }; + let self_id = PeerId::random(); + let (network_event_sender, _) = mpsc::channel(1); + let (swarm_cmd_sender, _) = mpsc::channel(1); + + let mut store = NodeRecordStore::with_config( + self_id, + store_config, + network_event_sender, + swarm_cmd_sender, + ); + + // Create a scratchpad + let unencrypted_scratchpad_data = Bytes::from_static(b"Test scratchpad data"); + let owner_sk = SecretKey::random(); + let owner_pk = owner_sk.public_key(); + + let mut scratchpad = Scratchpad::new(owner_pk); + + let _next_version = + scratchpad.update_and_sign(unencrypted_scratchpad_data.clone(), &owner_sk); + + let scratchpad_address = *scratchpad.address(); + + // Create a record from the scratchpad + let record = Record { + key: NetworkAddress::ScratchpadAddress(scratchpad_address).to_record_key(), + value: try_serialize_record(&scratchpad, RecordKind::Scratchpad)?.to_vec(), + expires: None, + publisher: None, + }; + + // Store the scratchpad using put_verified + assert!(store + .put_verified( + record.clone(), + RecordType::NonChunk(XorName::from_content(&record.value)) + ) + .is_ok()); + + // Mark as stored (simulating the CompletedWrite event) + store.mark_as_stored( + record.key.clone(), + RecordType::NonChunk(XorName::from_content(&record.value)), + ); + + // Verify the scratchpad is stored + let stored_record = store.get(&record.key); + assert!(stored_record.is_some(), "Scratchpad should be stored"); + + if let Some(stored) = stored_record { + let scratchpad = try_deserialize_record::(&stored)?; + + let stored_address = scratchpad.address(); + assert_eq!( + stored_address, &scratchpad_address, + "Stored scratchpad address should match original" + ); + + let decrypted_data = scratchpad.decrypt_data(&owner_sk)?; + + assert_eq!( + decrypted_data, + Some(unencrypted_scratchpad_data), + "Stored scratchpad data should match original" + ); + } + + store.remove(&record.key); + assert!( + store.get(&record.key).is_none(), + "Scratchpad should be removed after cleanup" + ); + + Ok(()) + } #[tokio::test] async fn pruning_on_full() -> Result<()> { let max_iterations = 10; @@ -1414,12 +1564,14 @@ mod tests { struct PeerStats { address: NetworkAddress, - pk: MainPubkey, + rewards_addr: RewardsAddress, records_stored: AtomicUsize, nanos_earned: AtomicU64, payments_received: AtomicUsize, } + // takes a long time to run + #[ignore] #[test] fn address_distribution_sim() { use rayon::prelude::*; @@ -1442,7 +1594,7 @@ mod tests { records_stored: AtomicUsize::new(0), nanos_earned: AtomicU64::new(0), payments_received: AtomicUsize::new(0), - pk: MainPubkey::new(SecretKey::random().public_key()), + rewards_addr: dummy_address(), }) .collect(); @@ -1508,8 +1660,10 @@ mod tests { peer.records_stored.fetch_add(1, Ordering::Relaxed); if peer_index == payee_index { - peer.nanos_earned - .fetch_add(cost.as_nano(), Ordering::Relaxed); + peer.nanos_earned.fetch_add( + cost.as_atto().try_into().unwrap_or(u64::MAX), + Ordering::Relaxed, + ); peer.payments_received.fetch_add(1, Ordering::Relaxed); } } @@ -1598,8 +1752,8 @@ mod tests { max_store_cost / min_store_cost ); assert!( - (max_earned / min_earned) < 300000000, - "earning distribution is not balanced, expected to be < 200000000, but was {}", + (max_earned / min_earned) < 500000000, + "earning distribution is not balanced, expected to be < 500000000, but was {}", max_earned / min_earned ); break; @@ -1610,7 +1764,7 @@ mod tests { fn pick_cheapest_payee( peers: &[PeerStats], close_group: &[usize], - ) -> eyre::Result<(usize, NanoTokens)> { + ) -> eyre::Result<(usize, AttoTokens)> { let mut costs_vec = Vec::with_capacity(close_group.len()); let mut address_to_index = BTreeMap::new(); @@ -1619,7 +1773,7 @@ mod tests { address_to_index.insert(peer.address.clone(), i); let close_records_stored = peer.records_stored.load(Ordering::Relaxed); - let cost = NanoTokens::from(calculate_cost_for_records(close_records_stored)); + let cost = AttoTokens::from(calculate_cost_for_records(close_records_stored)); let quote = PaymentQuote { content: XorName::default(), // unimportant for cost calc @@ -1631,11 +1785,13 @@ mod tests { received_payment_count: 1, // unimportant for cost calc live_time: 0, // unimportant for cost calc }, - pub_key: peer.pk.to_bytes().to_vec(), - signature: vec![], // unimportant for cost calc + bad_nodes: vec![], + pub_key: bls::SecretKey::random().public_key().to_bytes().to_vec(), + signature: vec![], + rewards_address: peer.rewards_addr, // unimportant for cost calc }; - costs_vec.push((peer.address.clone(), peer.pk, quote)); + costs_vec.push((peer.address.clone(), peer.rewards_addr, quote)); } // sort by address first diff --git a/sn_networking/src/record_store_api.rs b/sn_networking/src/record_store_api.rs index c61b8d7043..8e3bc67364 100644 --- a/sn_networking/src/record_store_api.rs +++ b/sn_networking/src/record_store_api.rs @@ -12,8 +12,8 @@ use libp2p::kad::{ store::{RecordStore, Result}, ProviderRecord, Record, RecordKey, }; +use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{storage::RecordType, NetworkAddress}; -use sn_transfers::{NanoTokens, QuotingMetrics}; use std::{borrow::Cow, collections::HashMap}; pub enum UnifiedRecordStore { @@ -111,11 +111,11 @@ impl UnifiedRecordStore { } } - pub(crate) fn store_cost(&self, key: &RecordKey) -> (NanoTokens, QuotingMetrics) { + pub(crate) fn store_cost(&self, key: &RecordKey) -> (AttoTokens, QuotingMetrics) { match self { Self::Client(_) => { warn!("Calling store cost calculation at Client. This should not happen"); - (NanoTokens::zero(), Default::default()) + (AttoTokens::zero(), Default::default()) } Self::Node(store) => store.store_cost(key), } diff --git a/sn_networking/src/transfers.rs b/sn_networking/src/transfers.rs index 29ba11976e..76b6349ce1 100644 --- a/sn_networking/src/transfers.rs +++ b/sn_networking/src/transfers.rs @@ -35,6 +35,7 @@ impl Network { // what we will have in hand. target_record: None, expected_holders: Default::default(), + is_register: false, }; let record = self.get_record_from_network(key.clone(), &get_cfg).await?; debug!( @@ -55,6 +56,7 @@ impl Network { retry_strategy: Some(RetryStrategy::Quick), target_record: None, expected_holders: Default::default(), + is_register: false, }; let record = match self.get_record_from_network(key.clone(), &get_cfg).await { Ok(record) => record, diff --git a/sn_networking/src/transport/other.rs b/sn_networking/src/transport/other.rs index 78683ca15d..9143c27e63 100644 --- a/sn_networking/src/transport/other.rs +++ b/sn_networking/src/transport/other.rs @@ -1,7 +1,7 @@ +#[cfg(feature = "open-metrics")] +use crate::MetricsRegistries; #[cfg(feature = "websockets")] use futures::future::Either; -#[cfg(feature = "open-metrics")] -use libp2p::metrics::Registry; #[cfg(feature = "websockets")] use libp2p::{core::upgrade, noise, yamux}; use libp2p::{ @@ -12,11 +12,11 @@ use libp2p::{ pub(crate) fn build_transport( keypair: &Keypair, - #[cfg(feature = "open-metrics")] registry: &mut Registry, + #[cfg(feature = "open-metrics")] registries: &mut MetricsRegistries, ) -> transport::Boxed<(PeerId, StreamMuxerBox)> { let trans = generate_quic_transport(keypair); #[cfg(feature = "open-metrics")] - let trans = libp2p::metrics::BandwidthTransport::new(trans, registry); + let trans = libp2p::metrics::BandwidthTransport::new(trans, &mut registries.standard_metrics); #[cfg(feature = "websockets")] // Using a closure here due to the complex return type diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 2cd71e7a4b..85619de0b5 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.111.4" +version = "0.112.0" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -14,15 +14,16 @@ name = "safenode" path = "src/bin/safenode/main.rs" [features] -default = ["metrics", "upnp", "reward-forward", "open-metrics"] -local-discovery = ["sn_networking/local-discovery"] +default = ["metrics", "upnp", "open-metrics", "encrypt-records"] +local = ["sn_networking/local", "test_utils/local", "sn_evm/local"] otlp = ["sn_logging/otlp"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] +nightly = [] open-metrics = ["sn_networking/open-metrics", "prometheus-client"] encrypt-records = ["sn_networking/encrypt-records"] upnp = ["sn_networking/upnp"] -reward-forward = ["sn_transfers/reward-forward"] +loud = ["sn_networking/loud"] # loud mode: print important messages to console [dependencies] assert_fs = "1.0.0" @@ -33,13 +34,14 @@ clap = { version = "4.2.1", features = ["derive"] } crdts = { version = "7.3", default-features = false, features = ["merkle"] } chrono = "~0.4.19" custom_debug = "~0.6.1" +const-hex = "1.12.0" dirs-next = "~2.0.0" eyre = "0.6.8" file-rotate = "0.7.3" futures = "~0.3.13" hex = "~0.4.3" itertools = "~0.12.1" -libp2p = { version = "0.53", features = ["tokio", "dns", "kad", "macros"] } +libp2p = { version = "0.54.1", features = ["tokio", "dns", "kad", "macros"] } prometheus-client = { version = "0.22", optional = true } # watch out updating this, protoc compiler needs to be installed on all build systems # arm builds + musl are very problematic @@ -50,14 +52,16 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_networking = { path = "../sn_networking", version = "0.18.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -sn_service_management = { path = "../sn_service_management", version = "0.3.14" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_networking = { path = "../sn_networking", version = "0.19.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12" } +sn_registers = { path = "../sn_registers", version = "0.4.0" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0" } +sn_service_management = { path = "../sn_service_management", version = "0.4.0" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } +sysinfo = { version = "0.30.8", default-features = false } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", @@ -79,16 +83,16 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] -assert_matches = "1.5.0" +evmlib = { path = "../evmlib", version = "0.1.1" } +autonomi = { path = "../autonomi", version = "0.2.0", features = ["registers"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_client = { path = "../sn_client", version = "0.110.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11", features = [ +sn_protocol = { path = "../sn_protocol", version = "0.17.12", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.19.3", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.20.0", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node/examples/register_inspect.rs b/sn_node/examples/register_inspect.rs deleted file mode 100644 index 3c3d70a36b..0000000000 --- a/sn_node/examples/register_inspect.rs +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crdts::merkle_reg::{Hash, MerkleReg, Node}; -use std::collections::HashMap; -use std::io; - -use sn_client::{acc_packet::load_account_wallet_or_create_with_mnemonic, Client, WalletClient}; -use sn_registers::{Entry, Permissions, RegisterAddress}; - -use xor_name::XorName; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::{ - eyre::{eyre, Result, WrapErr}, - Help, -}; - -#[derive(Parser, Debug)] -#[clap(name = "register inspect cli")] -struct Opt { - // Create register and give it a nickname (first user) - #[clap(long, default_value = "")] - reg_nickname: String, - - // Get existing register with given network address (any other user) - #[clap(long, default_value = "", conflicts_with = "reg_nickname")] - reg_address: String, -} - -#[tokio::main] -async fn main() -> Result<()> { - let opt = Opt::parse(); - let mut reg_nickname = opt.reg_nickname; - let reg_address_string = opt.reg_address; - - // let's build a random secret key to sign our Register ops - let signer = SecretKey::random(); - - println!("Starting SAFE client..."); - let client = Client::new(signer, None, None, None).await?; - println!("SAFE client signer public key: {:?}", client.signer_pk()); - - // The address of the register to be displayed - let mut meta = XorName::from_content(reg_nickname.as_bytes()); - let reg_address = if !reg_nickname.is_empty() { - meta = XorName::from_content(reg_nickname.as_bytes()); - RegisterAddress::new(meta, client.signer_pk()) - } else { - reg_nickname = format!("{reg_address_string:<6}..."); - RegisterAddress::from_hex(®_address_string) - .wrap_err("cannot parse hex register address")? - }; - - // Loading a local wallet (for ClientRegister::sync()). - // The wallet can have ZERO balance in this example, - // but the ClientRegister::sync() API requires a wallet and will - // create the register if not found even though we don't want that. - // - // The only want to avoid unwanted creation of a Register seems to - // be to supply an empty wallet. - // TODO Follow the issue about this: https://github.com/maidsafe/safe_network/issues/1308 - let root_dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe") - .join("client"); - - let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .wrap_err("Unable to read wallet file in {root_dir:?}") - .suggestion( - "If you have an old wallet file, it may no longer be compatible. Try removing it", - )?; - - let mut wallet_client = WalletClient::new(client.clone(), wallet); - - println!("Retrieving Register '{reg_nickname}' from SAFE"); - let mut reg_replica = match client.get_register(reg_address).await { - Ok(register) => { - println!( - "Register '{reg_nickname}' found at {:?}!", - register.address(), - ); - register - } - Err(_) => { - println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); - let (register, _cost, _royalties_fees) = client - .create_and_pay_for_register( - meta, - &mut wallet_client, - true, - Permissions::new_anyone_can_write(), - ) - .await?; - - register - } - }; - println!("Register address: {:?}", reg_replica.address().to_hex()); - println!("Register owned by: {:?}", reg_replica.owner()); - println!("Register permissions: {:?}", reg_replica.permissions()); - - // Repeatedly display of the register structure on command - loop { - println!(); - println!( - "Current total number of items in Register: {}", - reg_replica.size() - ); - println!("Latest value (more than one if concurrent writes were made):"); - println!("--------------"); - for (_, entry) in reg_replica.read().into_iter() { - println!("{}", String::from_utf8(entry)?); - } - println!("--------------"); - - if prompt_user() { - return Ok(()); - } - - // Sync with network after a delay - println!("Syncing with SAFE..."); - reg_replica.sync(&mut wallet_client, true, None).await?; - let merkle_reg = reg_replica.merkle_reg(); - let content = merkle_reg.read(); - println!("synced!"); - - // Show the Register structure - - // Index nodes to make it easier to see where a - // node appears multiple times in the output. - // Note: it isn't related to the order of insertion - // which is hard to determine. - let mut index: usize = 0; - let mut node_ordering: HashMap = HashMap::new(); - for (_hash, node) in content.hashes_and_nodes() { - index_node_and_descendants(node, &mut index, &mut node_ordering, merkle_reg); - } - - println!("======================"); - println!("Root (Latest) Node(s):"); - for node in content.nodes() { - let _ = print_node(0, node, &node_ordering); - } - - println!("======================"); - println!("Register Structure:"); - println!("(In general, earlier nodes are more indented)"); - let mut indents = 0; - for (_hash, node) in content.hashes_and_nodes() { - print_node_and_descendants(&mut indents, node, &node_ordering, merkle_reg); - } - - println!("======================"); - } -} - -fn index_node_and_descendants( - node: &Node, - index: &mut usize, - node_ordering: &mut HashMap, - merkle_reg: &MerkleReg, -) { - let node_hash = node.hash(); - if node_ordering.get(&node_hash).is_none() { - node_ordering.insert(node_hash, *index); - *index += 1; - } - - for child_hash in node.children.iter() { - if let Some(child_node) = merkle_reg.node(*child_hash) { - index_node_and_descendants(child_node, index, node_ordering, merkle_reg); - } else { - println!("ERROR looking up hash of child"); - } - } -} - -fn print_node_and_descendants( - indents: &mut usize, - node: &Node, - node_ordering: &HashMap, - merkle_reg: &MerkleReg, -) { - let _ = print_node(*indents, node, node_ordering); - - *indents += 1; - for child_hash in node.children.iter() { - if let Some(child_node) = merkle_reg.node(*child_hash) { - print_node_and_descendants(indents, child_node, node_ordering, merkle_reg); - } - } - *indents -= 1; -} - -fn print_node( - indents: usize, - node: &Node, - node_ordering: &HashMap, -) -> Result<()> { - let order = match node_ordering.get(&node.hash()) { - Some(order) => format!("{order}"), - None => String::new(), - }; - let indentation = " ".repeat(indents); - println!( - "{indentation}[{:>2}] Node({:?}..) Entry({:?})", - order, - hex::encode(&node.hash()[0..3]), - String::from_utf8(node.value.clone())? - ); - Ok(()) -} - -fn prompt_user() -> bool { - let mut input_text = String::new(); - println!(); - println!("Enter a blank line to print the latest register structure (or 'Q' to quit)"); - io::stdin() - .read_line(&mut input_text) - .expect("Failed to read text from stdin"); - - let string = input_text.trim().to_string(); - - string.contains('Q') || string.contains('q') -} diff --git a/sn_node/examples/registers.rs b/sn_node/examples/registers.rs deleted file mode 100644 index 70d3177a1c..0000000000 --- a/sn_node/examples/registers.rs +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, Client, Error, WalletClient, -}; -use sn_registers::{Permissions, RegisterAddress}; - -use xor_name::XorName; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::{ - eyre::{eyre, Result, WrapErr}, - Help, -}; -use std::{io, time::Duration}; -use tokio::time::sleep; - -#[derive(Parser, Debug)] -#[clap(name = "registers cli")] -struct Opt { - // A name for this user in the example - #[clap(long)] - user: String, - - // Create register and give it a nickname (first user) - #[clap(long, default_value = "")] - reg_nickname: String, - - // Get existing register with given network address (any other user) - #[clap(long, default_value = "", conflicts_with = "reg_nickname")] - reg_address: String, - - // Delay before synchronising local register with the network - #[clap(long, default_value_t = 2000)] - delay_millis: u64, -} - -#[tokio::main] -async fn main() -> Result<()> { - let opt = Opt::parse(); - let user = opt.user; - let mut reg_nickname = opt.reg_nickname; - let reg_address_string = opt.reg_address; - let delay = Duration::from_millis(opt.delay_millis); - - // let's build a random secret key to sign our Register ops - let signer = SecretKey::random(); - - println!("Starting SAFE client..."); - let client = Client::new(signer, None, None, None).await?; - println!("SAFE client signer public key: {:?}", client.signer_pk()); - - // We'll retrieve (or create if not found) a Register, and write on it - // in offline mode, syncing with the network periodically. - - let mut meta = XorName::from_content(reg_nickname.as_bytes()); - let reg_address = if !reg_nickname.is_empty() { - meta = XorName::from_content(reg_nickname.as_bytes()); - RegisterAddress::new(meta, client.signer_pk()) - } else { - reg_nickname = format!("{reg_address_string:<6}..."); - RegisterAddress::from_hex(®_address_string) - .wrap_err("cannot parse hex register address")? - }; - - // Loading a local wallet. It needs to have a non-zero balance for - // this example to be able to pay for the Register's storage. - let root_dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe") - .join("client"); - - let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .wrap_err("Unable to read wallet file in {root_dir:?}") - .suggestion( - "If you have an old wallet file, it may no longer be compatible. Try removing it", - )?; - let mut wallet_client = WalletClient::new(client.clone(), wallet); - - println!("Retrieving Register '{reg_nickname}' from SAFE, as user '{user}'"); - let mut reg_replica = match client.get_register(reg_address).await { - Ok(register) => { - println!( - "Register '{reg_nickname}' found at {:?}!", - register.address(), - ); - register - } - Err(_) => { - println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); - let (register, _cost, _royalties_fees) = client - .create_and_pay_for_register( - meta, - &mut wallet_client, - true, - Permissions::new_anyone_can_write(), - ) - .await?; - - register - } - }; - println!("Register address: {:?}", reg_replica.address().to_hex()); - println!("Register owned by: {:?}", reg_replica.owner()); - println!("Register permissions: {:?}", reg_replica.permissions()); - - // We'll loop asking for new msg to write onto the Register offline, - // then we'll be syncing the offline Register with the network, i.e. - // both pushing and ulling all changes made to it by us and other clients/users. - // If we detect branches when trying to write, after we synced with remote - // replicas of the Register, we'll merge them all back into a single value. - loop { - println!(); - println!( - "Current total number of items in Register: {}", - reg_replica.size() - ); - println!("Latest value (more than one if concurrent writes were made):"); - println!("--------------"); - for (_, entry) in reg_replica.read().into_iter() { - println!("{}", String::from_utf8(entry)?); - } - println!("--------------"); - - let input_text = prompt_user(); - if !input_text.is_empty() { - println!("Writing msg (offline) to Register: '{input_text}'"); - let msg = format!("[{user}]: {input_text}"); - match reg_replica.write(msg.as_bytes()) { - Ok(_) => {} - Err(Error::ContentBranchDetected(branches)) => { - println!( - "Branches ({}) detected in Register, let's merge them all...", - branches.len() - ); - reg_replica.write_merging_branches(msg.as_bytes())?; - } - Err(err) => return Err(err.into()), - } - } - - // Sync with network after a delay - println!("Syncing with SAFE in {delay:?}..."); - sleep(delay).await; - reg_replica.sync(&mut wallet_client, true, None).await?; - println!("synced!"); - } -} - -fn prompt_user() -> String { - let mut input_text = String::new(); - println!(); - println!("Enter a blank line to receive updates, or some text to be written."); - io::stdin() - .read_line(&mut input_text) - .expect("Failed to read text from stdin"); - - input_text.trim().to_string() -} diff --git a/sn_node/reactivate_examples/register_inspect.rs b/sn_node/reactivate_examples/register_inspect.rs new file mode 100644 index 0000000000..03f35ffa6e --- /dev/null +++ b/sn_node/reactivate_examples/register_inspect.rs @@ -0,0 +1,233 @@ +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// use crdts::merkle_reg::{Hash, MerkleReg, Node}; +// use std::collections::HashMap; +// use std::io; + +// // TODO: use autonomi API here +// // use sn_client::{acc_packet::load_account_wallet_or_create_with_mnemonic, Client, WalletClient}; +// use sn_registers::{Entry, Permissions, RegisterAddress}; + +// use xor_name::XorName; + +// use bls::SecretKey; +// use clap::Parser; +// use color_eyre::{ +// eyre::{eyre, Result, WrapErr}, +// Help, +// }; + +// #[derive(Parser, Debug)] +// #[clap(name = "register inspect cli")] +// struct Opt { +// // Create register and give it a nickname (first user) +// #[clap(long, default_value = "")] +// reg_nickname: String, + +// // Get existing register with given network address (any other user) +// #[clap(long, default_value = "", conflicts_with = "reg_nickname")] +// reg_address: String, +// } + +// #[tokio::main] +// async fn main() -> Result<()> { +// let opt = Opt::parse(); +// let mut reg_nickname = opt.reg_nickname; +// let reg_address_string = opt.reg_address; + +// // let's build a random secret key to sign our Register ops +// let signer = SecretKey::random(); + +// println!("Starting SAFE client..."); +// let client = Client::new(signer, None, None, None).await?; +// println!("SAFE client signer public key: {:?}", client.signer_pk()); + +// // The address of the register to be displayed +// let mut meta = XorName::from_content(reg_nickname.as_bytes()); +// let reg_address = if !reg_nickname.is_empty() { +// meta = XorName::from_content(reg_nickname.as_bytes()); +// RegisterAddress::new(meta, client.signer_pk()) +// } else { +// reg_nickname = format!("{reg_address_string:<6}..."); +// RegisterAddress::from_hex(®_address_string) +// .wrap_err("cannot parse hex register address")? +// }; + +// // Loading a local wallet (for ClientRegister::sync()). +// // The wallet can have ZERO balance in this example, +// // but the ClientRegister::sync() API requires a wallet and will +// // create the register if not found even though we don't want that. +// // +// // The only want to avoid unwanted creation of a Register seems to +// // be to supply an empty wallet. +// // TODO Follow the issue about this: https://github.com/maidsafe/safe_network/issues/1308 +// let root_dir = dirs_next::data_dir() +// .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? +// .join("safe") +// .join("client"); + +// let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) +// .wrap_err(format!"Unable to read wallet file in {root_dir:?}")) +// .suggestion( +// "If you have an old wallet file, it may no longer be compatible. Try removing it", +// )?; + +// let mut wallet_client = WalletClient::new(client.clone(), wallet); + +// println!("Retrieving Register '{reg_nickname}' from SAFE"); +// let mut reg_replica = match client.get_register(reg_address).await { +// Ok(register) => { +// println!( +// "Register '{reg_nickname}' found at {:?}!", +// register.address(), +// ); +// register +// } +// Err(_) => { +// println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); +// let (register, _cost, _royalties_fees) = client +// .create_and_pay_for_register( +// meta, +// &mut wallet_client, +// true, +// Permissions::new_anyone_can_write(), +// ) +// .await?; + +// register +// } +// }; +// println!("Register address: {:?}", reg_replica.address().to_hex()); +// println!("Register owned by: {:?}", reg_replica.owner()); +// println!("Register permissions: {:?}", reg_replica.permissions()); + +// // Repeatedly display of the register structure on command +// loop { +// println!(); +// println!( +// "Current total number of items in Register: {}", +// reg_replica.size() +// ); +// println!("Latest value (more than one if concurrent writes were made):"); +// println!("--------------"); +// for (_, entry) in reg_replica.read().into_iter() { +// println!("{}", String::from_utf8(entry)?); +// } +// println!("--------------"); + +// if prompt_user() { +// return Ok(()); +// } + +// // Sync with network after a delay +// println!("Syncing with SAFE..."); +// reg_replica.sync(&mut wallet_client, true, None).await?; +// let merkle_reg = reg_replica.merkle_reg(); +// let content = merkle_reg.read(); +// println!("synced!"); + +// // Show the Register structure + +// // Index nodes to make it easier to see where a +// // node appears multiple times in the output. +// // Note: it isn't related to the order of insertion +// // which is hard to determine. +// let mut index: usize = 0; +// let mut node_ordering: HashMap = HashMap::new(); +// for (_hash, node) in content.hashes_and_nodes() { +// index_node_and_descendants(node, &mut index, &mut node_ordering, merkle_reg); +// } + +// println!("======================"); +// println!("Root (Latest) Node(s):"); +// for node in content.nodes() { +// let _ = print_node(0, node, &node_ordering); +// } + +// println!("======================"); +// println!("Register Structure:"); +// println!("(In general, earlier nodes are more indented)"); +// let mut indents = 0; +// for (_hash, node) in content.hashes_and_nodes() { +// print_node_and_descendants(&mut indents, node, &node_ordering, merkle_reg); +// } + +// println!("======================"); +// } +// } + +// fn index_node_and_descendants( +// node: &Node, +// index: &mut usize, +// node_ordering: &mut HashMap, +// merkle_reg: &MerkleReg, +// ) { +// let node_hash = node.hash(); +// if node_ordering.get(&node_hash).is_none() { +// node_ordering.insert(node_hash, *index); +// *index += 1; +// } + +// for child_hash in node.children.iter() { +// if let Some(child_node) = merkle_reg.node(*child_hash) { +// index_node_and_descendants(child_node, index, node_ordering, merkle_reg); +// } else { +// println!("ERROR looking up hash of child"); +// } +// } +// } + +// fn print_node_and_descendants( +// indents: &mut usize, +// node: &Node, +// node_ordering: &HashMap, +// merkle_reg: &MerkleReg, +// ) { +// let _ = print_node(*indents, node, node_ordering); + +// *indents += 1; +// for child_hash in node.children.iter() { +// if let Some(child_node) = merkle_reg.node(*child_hash) { +// print_node_and_descendants(indents, child_node, node_ordering, merkle_reg); +// } +// } +// *indents -= 1; +// } + +// fn print_node( +// indents: usize, +// node: &Node, +// node_ordering: &HashMap, +// ) -> Result<()> { +// let order = match node_ordering.get(&node.hash()) { +// Some(order) => format!("{order}"), +// None => String::new(), +// }; +// let indentation = " ".repeat(indents); +// println!( +// "{indentation}[{:>2}] Node({:?}..) Entry({:?})", +// order, +// hex::encode(&node.hash()[0..3]), +// String::from_utf8(node.value.clone())? +// ); +// Ok(()) +// } + +// fn prompt_user() -> bool { +// let mut input_text = String::new(); +// println!(); +// println!("Enter a blank line to print the latest register structure (or 'Q' to quit)"); +// io::stdin() +// .read_line(&mut input_text) +// .expect("Failed to read text from stdin"); + +// let string = input_text.trim().to_string(); + +// string.contains('Q') || string.contains('q') +// } diff --git a/sn_node/reactivate_examples/registers.rs b/sn_node/reactivate_examples/registers.rs new file mode 100644 index 0000000000..6fa6c51045 --- /dev/null +++ b/sn_node/reactivate_examples/registers.rs @@ -0,0 +1,167 @@ +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// // TODO: use autonomi API here. +// // use sn_client::{ +// // acc_packet::load_account_wallet_or_create_with_mnemonic, Client, Error, WalletClient, +// // }; +// use sn_registers::{Permissions, RegisterAddress}; + +// use xor_name::XorName; + +// use bls::SecretKey; +// use clap::Parser; +// use color_eyre::{ +// eyre::{eyre, Result, WrapErr}, +// Help, +// }; +// use std::{io, time::Duration}; +// use tokio::time::sleep; + +// #[derive(Parser, Debug)] +// #[clap(name = "registers cli")] +// struct Opt { +// // A name for this user in the example +// #[clap(long)] +// user: String, + +// // Create register and give it a nickname (first user) +// #[clap(long, default_value = "")] +// reg_nickname: String, + +// // Get existing register with given network address (any other user) +// #[clap(long, default_value = "", conflicts_with = "reg_nickname")] +// reg_address: String, + +// // Delay before synchronising local register with the network +// #[clap(long, default_value_t = 2000)] +// delay_millis: u64, +// } + +// #[tokio::main] +// async fn main() -> Result<()> { +// let opt = Opt::parse(); +// let user = opt.user; +// let mut reg_nickname = opt.reg_nickname; +// let reg_address_string = opt.reg_address; +// let delay = Duration::from_millis(opt.delay_millis); + +// // let's build a random secret key to sign our Register ops +// let signer = SecretKey::random(); + +// println!("Starting SAFE client..."); +// let client = Client::new(signer, None, None, None).await?; +// println!("SAFE client signer public key: {:?}", client.signer_pk()); + +// // We'll retrieve (or create if not found) a Register, and write on it +// // in offline mode, syncing with the network periodically. + +// let mut meta = XorName::from_content(reg_nickname.as_bytes()); +// let reg_address = if !reg_nickname.is_empty() { +// meta = XorName::from_content(reg_nickname.as_bytes()); +// RegisterAddress::new(meta, client.signer_pk()) +// } else { +// reg_nickname = format!("{reg_address_string:<6}..."); +// RegisterAddress::from_hex(®_address_string) +// .wrap_err("cannot parse hex register address")? +// }; + +// // Loading a local wallet. It needs to have a non-zero balance for +// // this example to be able to pay for the Register's storage. +// let root_dir = dirs_next::data_dir() +// .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? +// .join("safe") +// .join("client"); + +// let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) +// .wrap_err("Unable to read wallet file in {root_dir:?}") +// .suggestion( +// "If you have an old wallet file, it may no longer be compatible. Try removing it", +// )?; +// let mut wallet_client = WalletClient::new(client.clone(), wallet); + +// println!("Retrieving Register '{reg_nickname}' from SAFE, as user '{user}'"); +// let mut reg_replica = match client.get_register(reg_address).await { +// Ok(register) => { +// println!( +// "Register '{reg_nickname}' found at {:?}!", +// register.address(), +// ); +// register +// } +// Err(_) => { +// println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); +// let (register, _cost, _royalties_fees) = client +// .create_and_pay_for_register( +// meta, +// &mut wallet_client, +// true, +// Permissions::new_anyone_can_write(), +// ) +// .await?; + +// register +// } +// }; +// println!("Register address: {:?}", reg_replica.address().to_hex()); +// println!("Register owned by: {:?}", reg_replica.owner()); +// println!("Register permissions: {:?}", reg_replica.permissions()); + +// // We'll loop asking for new msg to write onto the Register offline, +// // then we'll be syncing the offline Register with the network, i.e. +// // both pushing and ulling all changes made to it by us and other clients/users. +// // If we detect branches when trying to write, after we synced with remote +// // replicas of the Register, we'll merge them all back into a single value. +// loop { +// println!(); +// println!( +// "Current total number of items in Register: {}", +// reg_replica.size() +// ); +// println!("Latest value (more than one if concurrent writes were made):"); +// println!("--------------"); +// for (_, entry) in reg_replica.read().into_iter() { +// println!("{}", String::from_utf8(entry)?); +// } +// println!("--------------"); + +// let input_text = prompt_user(); +// if !input_text.is_empty() { +// println!("Writing msg (offline) to Register: '{input_text}'"); +// let msg = format!("[{user}]: {input_text}"); +// match reg_replica.write(msg.as_bytes()) { +// Ok(_) => {} +// Err(Error::ContentBranchDetected(branches)) => { +// println!( +// "Branches ({}) detected in Register, let's merge them all...", +// branches.len() +// ); +// reg_replica.write_merging_branches(msg.as_bytes())?; +// } +// Err(err) => return Err(err.into()), +// } +// } + +// // Sync with network after a delay +// println!("Syncing with SAFE in {delay:?}..."); +// sleep(delay).await; +// reg_replica.sync(&mut wallet_client, true, None).await?; +// println!("synced!"); +// } +// } + +// fn prompt_user() -> String { +// let mut input_text = String::new(); +// println!(); +// println!("Enter a blank line to receive updates, or some text to be written."); +// io::stdin() +// .read_line(&mut input_text) +// .expect("Failed to read text from stdin"); + +// input_text.trim().to_string() +// } diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index 0df4c65181..1b18429e89 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -10,16 +10,24 @@ extern crate tracing; mod rpc_service; +mod subcommands; -use clap::Parser; -use eyre::{eyre, Result}; +use crate::subcommands::EvmNetworkCommand; +use clap::{command, Parser}; +use color_eyre::{eyre::eyre, Result}; +use const_hex::traits::FromHex; use libp2p::{identity::Keypair, PeerId}; +use sn_evm::{get_evm_network_from_env, EvmNetwork, RewardsAddress}; #[cfg(feature = "metrics")] use sn_logging::metrics::init_metrics; use sn_logging::{Level, LogFormat, LogOutputDest, ReloadHandle}; use sn_node::{Marker, NodeBuilder, NodeEvent, NodeEventsReceiver}; use sn_peers_acquisition::PeersArgs; -use sn_protocol::{node::get_safenode_root_dir, node_rpc::NodeCtrl}; +use sn_protocol::{ + node::get_safenode_root_dir, + node_rpc::{NodeCtrl, StopResult}, + version::IDENTIFY_PROTOCOL_STR, +}; use std::{ env, io::Write, @@ -28,6 +36,7 @@ use std::{ process::Command, time::Duration, }; +use sysinfo::{self, System}; use tokio::{ runtime::Runtime, sync::{broadcast::error::RecvError, mpsc}, @@ -65,6 +74,7 @@ pub fn parse_log_output(val: &str) -> Result { // Please do not remove the blank lines in these doc comments. // They are used for inserting line breaks when the help menu is rendered in the UI. #[derive(Parser, Debug)] +#[command(disable_version_flag = true)] #[clap(name = "safenode cli", version = env!("CARGO_PKG_VERSION"))] struct Opt { /// Specify whether the node is operating from a home network and situated behind a NAT without port forwarding @@ -118,6 +128,19 @@ struct Opt { #[clap(long, verbatim_doc_comment)] max_archived_log_files: Option, + /// Specify the rewards address. + /// The rewards address is the address that will receive the rewards for the node. + /// It should be a valid EVM address. + #[clap(long)] + rewards_address: Option, + + /// Specify the EVM network to use. + /// The network can either be a pre-configured one or a custom network. + /// When setting a custom network, you must specify the RPC URL to a fully synced node and + /// the addresses of the network token and chunk payments contracts. + #[command(subcommand)] + evm_network: Option, + /// Specify the node's data directory. /// /// If not provided, the default location is platform specific: @@ -177,12 +200,70 @@ struct Opt { required_if_eq("metrics_server_port", "0") )] enable_metrics_server: bool, + + /// Print the crate version. + #[clap(long)] + crate_version: bool, + + /// Print the network protocol version. + #[clap(long)] + protocol_version: bool, + + /// Print the package version. + #[cfg(not(feature = "nightly"))] + #[clap(long)] + package_version: bool, + + /// Print version information. + #[clap(long)] + version: bool, } fn main() -> Result<()> { color_eyre::install()?; let opt = Opt::parse(); + if opt.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Node", + env!("CARGO_PKG_VERSION"), + Some(&IDENTIFY_PROTOCOL_STR) + ) + ); + return Ok(()); + } + + // evm config + let rewards_address = RewardsAddress::from_hex(opt.rewards_address.as_ref().expect( + "the following required arguments were not provided: --rewards-address ", + ))?; + + if opt.crate_version { + println!("Crate version: {}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + if opt.protocol_version { + println!("Network version: {}", *IDENTIFY_PROTOCOL_STR); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if opt.package_version { + println!("Package version: {}", sn_build_info::package_version()); + return Ok(()); + } + + let evm_network: EvmNetwork = opt + .evm_network + .as_ref() + .cloned() + .map(|v| Ok(v.into())) + .unwrap_or_else(get_evm_network_from_env)?; + println!("EVM network: {evm_network:?}"); + let node_socket_addr = SocketAddr::new(opt.ip, opt.port); let (root_dir, keypair) = get_root_dir_and_keypair(&opt.root_dir)?; @@ -197,6 +278,8 @@ fn main() -> Result<()> { env!("CARGO_PKG_VERSION") ); info!("\n{}\n{}", msg, "=".repeat(msg.len())); + + sn_build_info::log_version_info(env!("CARGO_PKG_VERSION"), &IDENTIFY_PROTOCOL_STR); debug!( "safenode built with git version: {}", sn_build_info::git_info() @@ -213,11 +296,12 @@ fn main() -> Result<()> { let restart_options = rt.block_on(async move { let mut node_builder = NodeBuilder::new( keypair, + rewards_address, + evm_network, node_socket_addr, bootstrap_peers, opt.local, root_dir, - opt.owner.clone(), #[cfg(feature = "upnp")] opt.upnp, ); @@ -299,13 +383,65 @@ You can check your reward balance by running: if let Err(err) = ctrl_tx_clone .send(NodeCtrl::Stop { delay: Duration::from_secs(1), - cause: eyre!("Ctrl-C received!"), + result: StopResult::Error(eyre!("Ctrl-C received!")), }) .await { error!("Failed to send node control msg to safenode bin main thread: {err}"); } }); + let ctrl_tx_clone_cpu = ctrl_tx.clone(); + // Monitor host CPU usage + tokio::spawn(async move { + use rand::{thread_rng, Rng}; + + const CPU_CHECK_INTERVAL: Duration = Duration::from_secs(60); + const CPU_USAGE_THRESHOLD: f32 = 50.0; + const HIGH_CPU_CONSECUTIVE_LIMIT: u8 = 5; + const NODE_STOP_DELAY: Duration = Duration::from_secs(1); + const INITIAL_DELAY_MIN_S: u64 = 10; + const INITIAL_DELAY_MAX_S: u64 = + HIGH_CPU_CONSECUTIVE_LIMIT as u64 * CPU_CHECK_INTERVAL.as_secs(); + const JITTER_MIN_S: u64 = 1; + const JITTER_MAX_S: u64 = 15; + + let mut sys = System::new_all(); + + let mut high_cpu_count: u8 = 0; + + // Random initial delay between 1 and 5 minutes + let initial_delay = + Duration::from_secs(thread_rng().gen_range(INITIAL_DELAY_MIN_S..=INITIAL_DELAY_MAX_S)); + tokio::time::sleep(initial_delay).await; + + loop { + sys.refresh_cpu(); + let cpu_usage = sys.global_cpu_info().cpu_usage(); + + if cpu_usage > CPU_USAGE_THRESHOLD { + high_cpu_count += 1; + } else { + high_cpu_count = 0; + } + + if high_cpu_count >= HIGH_CPU_CONSECUTIVE_LIMIT { + if let Err(err) = ctrl_tx_clone_cpu + .send(NodeCtrl::Stop { + delay: NODE_STOP_DELAY, + result: StopResult::Success(format!("Excess host CPU %{CPU_USAGE_THRESHOLD} detected for {HIGH_CPU_CONSECUTIVE_LIMIT} consecutive minutes!")), + }) + .await + { + error!("Failed to send node control msg to safenode bin main thread: {err}"); + } + break; + } + + // Add jitter to the interval + let jitter = Duration::from_secs(thread_rng().gen_range(JITTER_MIN_S..=JITTER_MAX_S)); + tokio::time::sleep(CPU_CHECK_INTERVAL + jitter).await; + } + }); // Start up gRPC interface if enabled by user if let Some(addr) = rpc { @@ -341,12 +477,21 @@ You can check your reward balance by running: break Ok(res); } - Some(NodeCtrl::Stop { delay, cause }) => { + Some(NodeCtrl::Stop { delay, result }) => { let msg = format!("Node is stopping in {delay:?}..."); info!("{msg}"); println!("{msg} Node log path: {log_output_dest}"); sleep(delay).await; - return Err(cause); + match result { + StopResult::Success(message) => { + info!("Node stopped successfully: {}", message); + return Ok(None); + } + StopResult::Error(cause) => { + error!("Node stopped with error: {}", cause); + return Err(cause); + } + } } Some(NodeCtrl::Update(_delay)) => { // TODO: implement self-update once safenode app releases are published again @@ -369,7 +514,7 @@ fn monitor_node_events(mut node_events_rx: NodeEventsReceiver, ctrl_tx: mpsc::Se if let Err(err) = ctrl_tx .send(NodeCtrl::Stop { delay: Duration::from_secs(1), - cause: eyre!("Node events channel closed!"), + result: StopResult::Error(eyre!("Node events channel closed!")), }) .await { @@ -383,7 +528,7 @@ fn monitor_node_events(mut node_events_rx: NodeEventsReceiver, ctrl_tx: mpsc::Se if let Err(err) = ctrl_tx .send(NodeCtrl::Stop { delay: Duration::from_secs(1), - cause: eyre!("Node terminated due to: {reason:?}"), + result: StopResult::Error(eyre!("Node terminated due to: {reason:?}")), }) .await { @@ -417,6 +562,7 @@ fn init_logging(opt: &Opt, peer_id: PeerId) -> Result<(String, ReloadHandle, Opt ("sn_protocol".to_string(), Level::DEBUG), ("sn_registers".to_string(), Level::DEBUG), ("sn_transfers".to_string(), Level::DEBUG), + ("sn_evm".to_string(), Level::DEBUG), ]; let output_dest = match &opt.log_output_dest { diff --git a/sn_node/src/bin/safenode/rpc_service.rs b/sn_node/src/bin/safenode/rpc_service.rs index 6943221741..8d16ba8f3d 100644 --- a/sn_node/src/bin/safenode/rpc_service.rs +++ b/sn_node/src/bin/safenode/rpc_service.rs @@ -9,7 +9,7 @@ use eyre::{ErrReport, Result}; use sn_logging::ReloadHandle; use sn_node::RunningNode; -use sn_protocol::node_rpc::NodeCtrl; +use sn_protocol::node_rpc::{NodeCtrl, StopResult}; use sn_protocol::safenode_proto::{ k_buckets_response, safe_node_server::{SafeNode, SafeNodeServer}, @@ -66,11 +66,7 @@ impl SafeNode for SafeNodeRpcService { pid: process::id(), bin_version: env!("CARGO_PKG_VERSION").to_string(), uptime_secs: self.started_instant.elapsed().as_secs(), - wallet_balance: self - .running_node - .get_node_wallet_balance() - .expect("Failed to get node wallet balance") - .as_nano(), + wallet_balance: 0, // NB TODO: Implement this using metrics data? }); Ok(resp) @@ -206,7 +202,14 @@ impl SafeNode for SafeNodeRpcService { }; let delay = Duration::from_millis(request.get_ref().delay_millis); - match self.ctrl_tx.send(NodeCtrl::Stop { delay, cause }).await { + match self + .ctrl_tx + .send(NodeCtrl::Stop { + delay, + result: StopResult::Success(cause.to_string()), + }) + .await + { Ok(()) => Ok(Response::new(StopResponse {})), Err(err) => Err(Status::new( Code::Internal, diff --git a/sn_node/src/bin/safenode/subcommands.rs b/sn_node/src/bin/safenode/subcommands.rs new file mode 100644 index 0000000000..c2b0389465 --- /dev/null +++ b/sn_node/src/bin/safenode/subcommands.rs @@ -0,0 +1,42 @@ +use clap::Subcommand; +use sn_evm::EvmNetwork; + +#[derive(Subcommand, Clone, Debug)] +#[allow(clippy::enum_variant_names)] +pub(crate) enum EvmNetworkCommand { + /// Use the Arbitrum One network + EvmArbitrumOne, + + /// Use the Arbitrum Sepolia network + EvmArbitrumSepolia, + + /// Use a custom network + EvmCustom { + /// The RPC URL for the custom network + #[arg(long)] + rpc_url: String, + + /// The payment token contract address + #[arg(long, short)] + payment_token_address: String, + + /// The chunk payments contract address + #[arg(long, short)] + data_payments_address: String, + }, +} + +#[allow(clippy::from_over_into)] +impl Into for EvmNetworkCommand { + fn into(self) -> EvmNetwork { + match self { + Self::EvmArbitrumOne => EvmNetwork::ArbitrumOne, + Self::EvmArbitrumSepolia => EvmNetwork::ArbitrumSepolia, + Self::EvmCustom { + rpc_url, + payment_token_address, + data_payments_address, + } => EvmNetwork::new_custom(&rpc_url, &payment_token_address, &data_payments_address), + } + } +} diff --git a/sn_node/src/error.rs b/sn_node/src/error.rs index 167db9eb20..a74ed00bc7 100644 --- a/sn_node/src/error.rs +++ b/sn_node/src/error.rs @@ -6,14 +6,16 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use sn_evm::AttoTokens; use sn_protocol::{NetworkAddress, PrettyPrintRecordKey}; -use sn_transfers::{NanoTokens, WalletError}; +use sn_transfers::WalletError; use thiserror::Error; pub(super) type Result = std::result::Result; /// Internal error. #[derive(Debug, Error)] +#[allow(missing_docs)] pub enum Error { #[error("Network error {0}")] Network(#[from] sn_networking::NetworkError), @@ -28,7 +30,7 @@ pub enum Error { Wallet(#[from] WalletError), #[error("Transfers Error {0}")] - Transfers(#[from] sn_transfers::TransferError), + Transfers(#[from] sn_evm::EvmError), #[error("Failed to parse NodeEvent")] NodeEventParsingFailed, @@ -49,6 +51,13 @@ pub enum Error { #[error("The Record::key does not match with the key derived from Record::value")] RecordKeyMismatch, + // Scratchpad is old version + #[error("A newer version of this Scratchpad already exists")] + IgnoringOutdatedScratchpadPut, + // Scratchpad is invalid + #[error("Scratchpad signature is invalid over the counter + content hash")] + InvalidScratchpadSignature, + // ---------- Payment Errors #[error("The content of the payment quote is invalid")] InvalidQuoteContent, @@ -67,8 +76,8 @@ pub enum Error { /// The amount paid by payment proof is not the required for the received content #[error("The amount paid by payment proof is not the required for the received content, paid {paid}, expected {expected}")] PaymentProofInsufficientAmount { - paid: NanoTokens, - expected: NanoTokens, + paid: AttoTokens, + expected: AttoTokens, }, #[error("A payment we received contains cash notes already confirmed to be spent")] ReusedPayment, @@ -86,4 +95,9 @@ pub enum Error { /// Error occurred in an async thread #[error("Error occured in async thread: {0}")] JoinErrorInAsyncThread(String), + + #[error("EVM Network error: {0}")] + EvmNetwork(String), + #[error("Invalid quote timestamp: {0}")] + InvalidQuoteTimestamp(String), } diff --git a/sn_node/src/event.rs b/sn_node/src/event.rs index c3e9857bad..6237e1d8bf 100644 --- a/sn_node/src/event.rs +++ b/sn_node/src/event.rs @@ -9,8 +9,11 @@ use crate::error::{Error, Result}; use serde::{Deserialize, Serialize}; -use sn_protocol::storage::{ChunkAddress, RegisterAddress}; -use sn_transfers::UniquePubkey; +use sn_evm::AttoTokens; +use sn_protocol::{ + storage::{ChunkAddress, RegisterAddress}, + NetworkAddress, +}; use tokio::sync::broadcast; const NODE_EVENT_CHANNEL_SIZE: usize = 500; @@ -62,8 +65,8 @@ pub enum NodeEvent { RegisterCreated(RegisterAddress), /// A Register edit operation has been applied in local storage RegisterEdited(RegisterAddress), - /// A CashNote Spend has been stored in local storage - SpendStored(UniquePubkey), + /// A new reward was received + RewardReceived(AttoTokens, NetworkAddress), /// One of the sub event channel closed and unrecoverable. ChannelClosed, /// Terminates the node diff --git a/sn_node/src/lib.rs b/sn_node/src/lib.rs index 4f097a7724..60f0222abf 100644 --- a/sn_node/src/lib.rs +++ b/sn_node/src/lib.rs @@ -48,7 +48,6 @@ use crate::error::{Error, Result}; use libp2p::PeerId; use sn_networking::{Network, SwarmLocalState}; use sn_protocol::{get_port_from_multiaddr, NetworkAddress}; -use sn_transfers::{HotWallet, NanoTokens}; use std::{ collections::{BTreeMap, HashSet}, path::PathBuf, @@ -60,6 +59,7 @@ use std::{ pub struct RunningNode { network: Network, node_events_channel: NodeEventsChannel, + root_dir_path: PathBuf, } impl RunningNode { @@ -77,13 +77,7 @@ impl RunningNode { /// - Windows: C:\Users\\AppData\Roaming\safe\node\ #[expect(rustdoc::invalid_html_tags)] pub fn root_dir_path(&self) -> PathBuf { - self.network.root_dir_path().clone() - } - - /// Returns the wallet balance of the node - pub fn get_node_wallet_balance(&self) -> Result { - let wallet = HotWallet::load_from(self.network.root_dir_path())?; - Ok(wallet.balance()) + self.root_dir_path.clone() } /// Returns a `SwarmLocalState` with some information obtained from swarm's local state. @@ -110,6 +104,7 @@ impl RunningNode { /// Returns the list of all the RecordKeys held by the node pub async fn get_all_record_addresses(&self) -> Result> { + #[allow(clippy::mutable_key_type)] // for Bytes in NetworkAddress let addresses: HashSet<_> = self .network .get_all_local_record_addresses() diff --git a/sn_node/src/log_markers.rs b/sn_node/src/log_markers.rs index ab1aacf325..0be204d38c 100644 --- a/sn_node/src/log_markers.rs +++ b/sn_node/src/log_markers.rs @@ -43,6 +43,8 @@ pub enum Marker<'a> { ValidRegisterRecordPutFromNetwork(&'a PrettyPrintRecordKey<'a>), /// Valid non-existing Spend record PUT from the network received and stored ValidSpendRecordPutFromNetwork(&'a PrettyPrintRecordKey<'a>), + /// Valid Scratchpad record PUT from the network received and stored + ValidScratchpadRecordPutFromNetwork(&'a PrettyPrintRecordKey<'a>), /// Valid paid to us and royalty paid chunk stored ValidPaidChunkPutFromClient(&'a PrettyPrintRecordKey<'a>), @@ -50,6 +52,8 @@ pub enum Marker<'a> { ValidPaidRegisterPutFromClient(&'a PrettyPrintRecordKey<'a>), /// Valid spend stored ValidSpendPutFromClient(&'a PrettyPrintRecordKey<'a>), + /// Valid scratchpad stored + ValidScratchpadRecordPutFromClient(&'a PrettyPrintRecordKey<'a>), /// Record rejected RecordRejected(&'a PrettyPrintRecordKey<'a>, &'a Error), diff --git a/sn_node/src/metrics.rs b/sn_node/src/metrics.rs index 4ba458448e..83ae86e4d6 100644 --- a/sn_node/src/metrics.rs +++ b/sn_node/src/metrics.rs @@ -14,10 +14,12 @@ use prometheus_client::{ family::Family, gauge::Gauge, histogram::{exponential_buckets, Histogram}, + info::Info, }, - registry::Registry, }; -use sn_networking::Instant; +use sn_networking::target_arch::Instant; +#[cfg(feature = "open-metrics")] +use sn_networking::MetricsRegistries; #[derive(Clone)] /// The shared recorders that are used to record metrics. @@ -36,7 +38,7 @@ pub(crate) struct NodeMetricsRecorder { // wallet pub(crate) current_reward_wallet_balance: Gauge, - pub(crate) total_forwarded_rewards: Gauge, + pub(crate) _total_forwarded_rewards: Gauge, // to track the uptime of the node. pub(crate) started_instant: Instant, @@ -56,8 +58,20 @@ enum RecordType { } impl NodeMetricsRecorder { - pub(crate) fn new(registry: &mut Registry) -> Self { - let sub_registry = registry.sub_registry_with_prefix("sn_node"); + pub(crate) fn new(registries: &mut MetricsRegistries) -> Self { + let node_metadata_sub_registry = registries.metadata.sub_registry_with_prefix("sn_node"); + node_metadata_sub_registry.register( + "safenode_version", + "The version of the safe node", + Info::new(vec![( + "safenode_version".to_string(), + env!("CARGO_PKG_VERSION").to_string(), + )]), + ); + + let sub_registry = registries + .standard_metrics + .sub_registry_with_prefix("sn_node"); let put_record_ok = Family::default(); sub_registry.register( @@ -130,7 +144,7 @@ impl NodeMetricsRecorder { peer_added_to_routing_table, peer_removed_from_routing_table, current_reward_wallet_balance, - total_forwarded_rewards, + _total_forwarded_rewards: total_forwarded_rewards, started_instant: Instant::now(), uptime, } diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 0caeab2fa7..4bb21c720c 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -7,21 +7,17 @@ // permissions and limitations relating to use of the SAFE Network Software. use super::{ - error::{Error, Result}, - event::NodeEventsChannel, - quote::quotes_verification, - Marker, NodeEvent, + error::Result, event::NodeEventsChannel, quote::quotes_verification, Marker, NodeEvent, }; #[cfg(feature = "open-metrics")] use crate::metrics::NodeMetricsRecorder; use crate::RunningNode; use bytes::Bytes; use libp2p::{identity::Keypair, Multiaddr, PeerId}; -#[cfg(feature = "open-metrics")] -use prometheus_client::metrics::{gauge::Gauge, info::Info}; -#[cfg(feature = "open-metrics")] -use prometheus_client::registry::Registry; use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; +use sn_evm::{AttoTokens, RewardsAddress}; +#[cfg(feature = "open-metrics")] +use sn_networking::MetricsRegistries; use sn_networking::{ close_group_majority, Instant, Network, NetworkBuilder, NetworkError, NetworkEvent, NodeIssue, SwarmDriver, @@ -31,7 +27,6 @@ use sn_protocol::{ messages::{ChunkProof, CmdResponse, Query, QueryResponse, Request, Response}, NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; -use sn_transfers::{HotWallet, MainPubkey, MainSecretKey, NanoTokens, PAYMENT_FORWARD_PK}; use std::{ net::SocketAddr, path::PathBuf, @@ -46,12 +41,7 @@ use tokio::{ task::{spawn, JoinHandle}, }; -#[cfg(feature = "reward-forward")] -use libp2p::kad::{Quorum, Record}; -#[cfg(feature = "reward-forward")] -use sn_networking::PutRecordCfg; -#[cfg(feature = "reward-forward")] -use sn_protocol::storage::{try_serialize_record, RecordKind, SpendAddress}; +use sn_evm::EvmNetwork; /// Interval to trigger replication of all records to all peers. /// This is the max time it should take. Minimum interval at any node will be half this @@ -61,10 +51,6 @@ pub const PERIODIC_REPLICATION_INTERVAL_MAX_S: u64 = 45; /// This is the max time it should take. Minimum interval at any node will be half this const PERIODIC_BAD_NODE_DETECTION_INTERVAL_MAX_S: u64 = 600; -/// Interval to trigger reward forwarding. -/// This is the max time it should take. Minimum interval at any node will be half this -const PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S: u64 = 450; - /// Max number of attempts that chunk proof verification will be carried out against certain target, /// before classifying peer as a bad peer. const MAX_CHUNK_PROOF_VERIFY_ATTEMPTS: usize = 3; @@ -72,10 +58,6 @@ const MAX_CHUNK_PROOF_VERIFY_ATTEMPTS: usize = 3; /// Interval between chunk proof verification to be retired against the same target. const CHUNK_PROOF_VERIFY_RETRY_INTERVAL: Duration = Duration::from_secs(15); -#[cfg(feature = "reward-forward")] -/// Track the forward balance by storing the balance in a file. This is useful to restore the balance between restarts. -const FORWARDED_BALANCE_FILE_NAME: &str = "forwarded_balance"; - /// Interval to update the nodes uptime metric const UPTIME_METRICS_UPDATE_INTERVAL: Duration = Duration::from_secs(10); @@ -84,7 +66,9 @@ const UNRELEVANT_RECORDS_CLEANUP_INTERVAL: Duration = Duration::from_secs(3600); /// Helper to build and run a Node pub struct NodeBuilder { - keypair: Keypair, + identity_keypair: Keypair, + evm_address: RewardsAddress, + evm_network: EvmNetwork, addr: SocketAddr, initial_peers: Vec, local: bool, @@ -94,24 +78,27 @@ pub struct NodeBuilder { metrics_server_port: Option, /// Enable hole punching for nodes connecting from home networks. pub is_behind_home_network: bool, - owner: Option, #[cfg(feature = "upnp")] upnp: bool, } impl NodeBuilder { /// Instantiate the builder + #[expect(clippy::too_many_arguments)] pub fn new( - keypair: Keypair, + identity_keypair: Keypair, + evm_address: RewardsAddress, + evm_network: EvmNetwork, addr: SocketAddr, initial_peers: Vec, local: bool, root_dir: PathBuf, - owner: Option, #[cfg(feature = "upnp")] upnp: bool, ) -> Self { Self { - keypair, + identity_keypair, + evm_address, + evm_network, addr, initial_peers, local, @@ -119,7 +106,6 @@ impl NodeBuilder { #[cfg(feature = "open-metrics")] metrics_server_port: None, is_behind_home_network: false, - owner, #[cfg(feature = "upnp")] upnp, } @@ -144,43 +130,17 @@ impl NodeBuilder { /// /// Returns an error if there is a problem initializing the `SwarmDriver`. pub fn build_and_run(self) -> Result { - // Using the signature as the seed of generating the reward_key - let sig_vec = match self.keypair.sign(b"generate reward seed") { - Ok(sig) => sig, - Err(_err) => return Err(Error::FailedToGenerateRewardKey), - }; - let mut rng = sn_transfers::rng::from_vec(&sig_vec); - - let reward_key = MainSecretKey::random_from_rng(&mut rng); - let reward_address = reward_key.main_pubkey(); - - let mut wallet = HotWallet::load_from_main_key(&self.root_dir, reward_key)?; - // store in case it's a fresh wallet created if none was found - wallet.deposit_and_store_to_disk(&vec![])?; - - let mut network_builder = NetworkBuilder::new(self.keypair, self.local, self.root_dir); + let mut network_builder = NetworkBuilder::new(self.identity_keypair, self.local); #[cfg(feature = "open-metrics")] - let node_metrics = if self.metrics_server_port.is_some() { + let metrics_recorder = if self.metrics_server_port.is_some() { // metadata registry - let mut metadata_registry = Registry::default(); - let node_metadata_sub_registry = metadata_registry.sub_registry_with_prefix("sn_node"); - node_metadata_sub_registry.register( - "safenode_version", - "The version of the safe node", - Info::new(vec![( - "safenode_version".to_string(), - env!("CARGO_PKG_VERSION").to_string(), - )]), - ); - network_builder.metrics_metadata_registry(metadata_registry); + let mut metrics_registries = MetricsRegistries::default(); + let metrics_recorder = NodeMetricsRecorder::new(&mut metrics_registries); - // metrics registry - let mut metrics_registry = Registry::default(); - let node_metrics = NodeMetricsRecorder::new(&mut metrics_registry); - network_builder.metrics_registry(metrics_registry); + network_builder.metrics_registries(metrics_registries); - Some(node_metrics) + Some(metrics_recorder) } else { None }; @@ -194,17 +154,18 @@ impl NodeBuilder { #[cfg(feature = "upnp")] network_builder.upnp(self.upnp); - let (network, network_event_receiver, swarm_driver) = network_builder.build_node()?; + let (network, network_event_receiver, swarm_driver) = + network_builder.build_node(self.root_dir.clone())?; let node_events_channel = NodeEventsChannel::default(); let node = NodeInner { network: network.clone(), events_channel: node_events_channel.clone(), initial_peers: self.initial_peers, - reward_address, + reward_address: self.evm_address, #[cfg(feature = "open-metrics")] - node_metrics, - owner: self.owner, + metrics_recorder, + evm_network: self.evm_network, }; let node = Node { inner: Arc::new(node), @@ -212,6 +173,7 @@ impl NodeBuilder { let running_node = RunningNode { network, node_events_channel, + root_dir_path: self.root_dir, }; // Run the node @@ -237,11 +199,9 @@ struct NodeInner { initial_peers: Vec, network: Network, #[cfg(feature = "open-metrics")] - node_metrics: Option, - /// Node owner's discord username, in readable format - /// If not set, there will be no payment forward to be undertaken - owner: Option, - reward_address: MainPubkey, + metrics_recorder: Option, + reward_address: RewardsAddress, + evm_network: EvmNetwork, } impl Node { @@ -261,42 +221,27 @@ impl Node { } #[cfg(feature = "open-metrics")] - /// Returns a reference to the NodeMetrics if the `open-metrics` feature flag is enabled - pub(crate) fn node_metrics(&self) -> Option<&NodeMetricsRecorder> { - self.inner.node_metrics.as_ref() - } - - /// Returns the owner of the node - pub(crate) fn owner(&self) -> Option<&String> { - self.inner.owner.as_ref() + /// Returns a reference to the NodeMetricsRecorder if the `open-metrics` feature flag is enabled + /// This is used to record various metrics for the node. + pub(crate) fn metrics_recorder(&self) -> Option<&NodeMetricsRecorder> { + self.inner.metrics_recorder.as_ref() } /// Returns the reward address of the node - pub(crate) fn reward_address(&self) -> &MainPubkey { + pub(crate) fn reward_address(&self) -> &RewardsAddress { &self.inner.reward_address } + pub(crate) fn evm_network(&self) -> &EvmNetwork { + &self.inner.evm_network + } + /// Runs the provided `SwarmDriver` and spawns a task to process for `NetworkEvents` fn run(self, swarm_driver: SwarmDriver, mut network_event_receiver: Receiver) { let mut rng = StdRng::from_entropy(); let peers_connected = Arc::new(AtomicUsize::new(0)); - // read the forwarded balance from the file and set the metric. - // This is done initially because reward forwarding takes a while to kick in - #[cfg(all(feature = "reward-forward", feature = "open-metrics"))] - let node_copy = self.clone(); - #[cfg(all(feature = "reward-forward", feature = "open-metrics"))] - let _handle = spawn(async move { - let root_dir = node_copy.network().root_dir_path().clone(); - let balance_file_path = root_dir.join(FORWARDED_BALANCE_FILE_NAME); - let balance = read_forwarded_balance_value(&balance_file_path); - - if let Some(node_metrics) = node_copy.node_metrics() { - let _ = node_metrics.total_forwarded_rewards.set(balance as i64); - } - }); - let _handle = spawn(swarm_driver.run()); let _handle = spawn(async move { // use a random inactivity timeout to ensure that the nodes do not sync when messages @@ -323,19 +268,6 @@ impl Node { let mut rolling_index = 0; - // use a random timeout to ensure not sync when transmit messages. - let balance_forward_interval: u64 = rng.gen_range( - PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S / 2..PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S, - ); - let balance_forward_time = Duration::from_secs(balance_forward_interval); - debug!( - "BalanceForward interval set to {balance_forward_time:?} to: {:?}", - PAYMENT_FORWARD_PK.to_hex(), - ); - - let mut balance_forward_interval = tokio::time::interval(balance_forward_time); - let _ = balance_forward_interval.tick().await; // first tick completes immediately - let mut uptime_metrics_update_interval = tokio::time::interval(UPTIME_METRICS_UPDATE_INTERVAL); let _ = uptime_metrics_update_interval.tick().await; // first tick completes immediately @@ -395,40 +327,10 @@ impl Node { rolling_index += 1; } } - // runs every balance_forward_interval time - _ = balance_forward_interval.tick() => { - if cfg!(feature = "reward-forward") { - if let Some(owner) = self.owner() { - let start = Instant::now(); - debug!("Periodic balance forward triggered"); - let network = self.network().clone(); - let forwarding_reason = owner.clone(); - - #[cfg(feature = "open-metrics")] - let total_forwarded_rewards = self.node_metrics().map(|metrics|metrics.total_forwarded_rewards.clone()); - #[cfg(feature = "open-metrics")] - let current_reward_wallet_balance = self.node_metrics().map(|metrics|metrics.current_reward_wallet_balance.clone()); - - let _handle = spawn(async move { - - #[cfg(feature = "open-metrics")] - if let Err(err) = Self::try_forward_balance(network, forwarding_reason, total_forwarded_rewards,current_reward_wallet_balance) { - error!("Error while trying to forward balance: {err:?}"); - } - #[cfg(not(feature = "open-metrics"))] - if let Err(err) = Self::try_forward_balance(network, forwarding_reason) { - error!("Error while trying to forward balance: {err:?}"); - } - info!("Periodic balance forward took {:?}", start.elapsed()); - }); - } - - } - } _ = uptime_metrics_update_interval.tick() => { #[cfg(feature = "open-metrics")] - if let Some(node_metrics) = self.node_metrics() { - let _ = node_metrics.uptime.set(node_metrics.started_instant.elapsed().as_secs() as i64); + if let Some(metrics_recorder) = self.metrics_recorder() { + let _ = metrics_recorder.uptime.set(metrics_recorder.started_instant.elapsed().as_secs() as i64); } } _ = unrelevant_records_cleanup_interval.tick() => { @@ -448,8 +350,8 @@ impl Node { pub(crate) fn record_metrics(&self, marker: Marker) { marker.log(); #[cfg(feature = "open-metrics")] - if let Some(node_metrics) = self.node_metrics() { - node_metrics.record(marker) + if let Some(metrics_recorder) = self.metrics_recorder() { + metrics_recorder.record(marker) } } @@ -499,7 +401,7 @@ impl Node { } NetworkEvent::NewListenAddr(_) => { event_header = "NewListenAddr"; - if !cfg!(feature = "local-discovery") { + if !cfg!(feature = "local") { let network = self.network().clone(); let peers = self.initial_peers().clone(); let _handle = spawn(async move { @@ -694,7 +596,7 @@ impl Node { async fn handle_query( network: &Network, query: Query, - payment_address: MainPubkey, + payment_address: RewardsAddress, ) -> Response { let resp: QueryResponse = match query { Query::GetStoreCost(address) => { @@ -705,8 +607,8 @@ impl Node { let store_cost = network.get_local_storecost(record_key.clone()).await; match store_cost { - Ok((cost, quoting_metrics)) => { - if cost == NanoTokens::zero() { + Ok((cost, quoting_metrics, bad_nodes)) => { + if cost == AttoTokens::zero() { QueryResponse::GetStoreCost { quote: Err(ProtocolError::RecordExists( PrettyPrintRecordKey::from(&record_key).into_owned(), @@ -721,6 +623,8 @@ impl Node { cost, &address, "ing_metrics, + bad_nodes, + &payment_address, ), payment_address, peer_address: NetworkAddress::from_peer(self_id), @@ -862,130 +766,6 @@ impl Node { } } } - - /// Forward received rewards to another address - fn try_forward_balance( - network: Network, - forward_reason: String, - #[cfg(feature = "open-metrics")] forwarded_balance_metric: Option, - #[cfg(feature = "open-metrics")] current_reward_wallet_balance: Option, - ) -> Result<()> { - let mut spend_requests = vec![]; - { - // load wallet - let mut wallet = HotWallet::load_from(network.root_dir_path())?; - let balance = wallet.balance(); - - if !balance.is_zero() { - let payee = vec![(balance, *PAYMENT_FORWARD_PK)]; - spend_requests.extend(wallet.prepare_forward_signed_spend(payee, forward_reason)?); - } - } - let total_forwarded_amount = spend_requests - .iter() - .map(|s| s.amount().as_nano()) - .sum::(); - - let record_kind = RecordKind::Spend; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::Majority, - retry_strategy: None, - use_put_record_to: None, - verification: None, - }; - - info!( - "Reward forwarding sending {} spends in this iteration. Total forwarded amount: {total_forwarded_amount}", - spend_requests.len() - ); - - for spend_request in spend_requests { - let network_clone = network.clone(); - let put_cfg_clone = put_cfg.clone(); - - // Sent out spend in separate thread to avoid blocking the main one - let _handle = spawn(async move { - let unique_pubkey = *spend_request.unique_pubkey(); - let cash_note_addr = SpendAddress::from_unique_pubkey(&unique_pubkey); - let network_address = NetworkAddress::from_spend_address(cash_note_addr); - - let record_key = network_address.to_record_key(); - let pretty_key = PrettyPrintRecordKey::from(&record_key); - - debug!("Reward forwarding in spend {pretty_key:?}: {spend_request:#?}"); - - let value = if let Ok(value) = try_serialize_record(&[spend_request], record_kind) { - value - } else { - error!("Reward forwarding: Failed to serialise spend {pretty_key:?}"); - return; - }; - - let record = Record { - key: record_key.clone(), - value: value.to_vec(), - publisher: None, - expires: None, - }; - - let result = network_clone.put_record(record, &put_cfg_clone).await; - - match result { - Ok(_) => info!("Reward forwarding completed sending spend {pretty_key:?}"), - Err(err) => { - info!("Reward forwarding: sending spend {pretty_key:?} failed with {err:?}") - } - } - }); - - std::thread::sleep(Duration::from_millis(500)); - } - - // write the balance to a file - let balance_file_path = network.root_dir_path().join(FORWARDED_BALANCE_FILE_NAME); - let old_balance = read_forwarded_balance_value(&balance_file_path); - let updated_balance = old_balance + total_forwarded_amount; - debug!("Updating forwarded balance to {updated_balance}"); - write_forwarded_balance_value(&balance_file_path, updated_balance)?; - - #[cfg(feature = "open-metrics")] - { - if let Some(forwarded_balance_metric) = forwarded_balance_metric { - let _ = forwarded_balance_metric.set(updated_balance as i64); - } - - let wallet = HotWallet::load_from(network.root_dir_path())?; - let balance = wallet.balance(); - if let Some(current_reward_wallet_balance) = current_reward_wallet_balance { - let _ = current_reward_wallet_balance.set(balance.as_nano() as i64); - } - } - - Ok(()) - } -} - -fn read_forwarded_balance_value(balance_file_path: &PathBuf) -> u64 { - debug!("Reading forwarded balance from file {balance_file_path:?}"); - match std::fs::read_to_string(balance_file_path) { - Ok(balance) => balance.parse::().unwrap_or_else(|_| { - debug!("The balance from file is not a valid number"); - 0 - }), - Err(_) => { - debug!("Error while reading to string, setting the balance to 0. This can happen at node init."); - 0 - } - } -} - -fn write_forwarded_balance_value(balance_file_path: &PathBuf, balance: u64) -> Result<()> { - if let Err(err) = std::fs::write(balance_file_path, balance.to_string()) { - error!( - "Failed to write the updated balance to the file {balance_file_path:?} with {err:?}" - ); - } - Ok(()) } async fn chunk_proof_verify_peer( @@ -1052,29 +832,3 @@ fn received_valid_chunk_proof( None } } - -#[cfg(test)] -mod tests { - - use crate::node::{read_forwarded_balance_value, write_forwarded_balance_value}; - use color_eyre::Result; - use tempfile::tempdir; - #[test] - fn read_and_write_reward_to_file() -> Result<()> { - let dir = tempdir()?; - let balance_file_path = dir.path().join("forwarded_balance"); - - let balance = read_forwarded_balance_value(&balance_file_path); - assert_eq!(balance, 0); - - write_forwarded_balance_value(&balance_file_path, balance + 10)?; - let balance = read_forwarded_balance_value(&balance_file_path); - assert_eq!(balance, 10); - - write_forwarded_balance_value(&balance_file_path, balance + 100)?; - let balance = read_forwarded_balance_value(&balance_file_path); - assert_eq!(balance, 110); - - Ok(()) - } -} diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 8879b55cb4..3f3343f403 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -6,34 +6,33 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{node::Node, quote::verify_quote_for_storecost, Error, Marker, Result}; +use crate::{node::Node, Error, Marker, Result}; use libp2p::kad::{Record, RecordKey}; +use sn_evm::ProofOfPayment; use sn_networking::{get_raw_signed_spends_from_record, GetRecordError, NetworkError}; use sn_protocol::{ storage::{ try_deserialize_record, try_serialize_record, Chunk, RecordHeader, RecordKind, RecordType, - SpendAddress, + Scratchpad, SpendAddress, }, NetworkAddress, PrettyPrintRecordKey, }; use sn_registers::SignedRegister; -use sn_transfers::{ - calculate_royalties_fee, CashNote, CashNoteRedemption, HotWallet, NanoTokens, Payment, - SignedSpend, Transfer, TransferError, UniquePubkey, WalletError, NETWORK_ROYALTIES_PK, -}; +use sn_transfers::{SignedSpend, TransferError, UniquePubkey, QUOTE_EXPIRATION_SECS}; use std::collections::BTreeSet; +use std::time::{Duration, UNIX_EPOCH}; use tokio::task::JoinSet; use xor_name::XorName; impl Node { - /// Validate a record and it's payment, and store the record to the RecordStore + /// Validate a record and its payment, and store the record to the RecordStore pub(crate) async fn validate_and_store_record(&self, record: Record) -> Result<()> { let record_header = RecordHeader::from_record(&record)?; match record_header.kind { RecordKind::ChunkWithPayment => { let record_key = record.key.clone(); - let (payment, chunk) = try_deserialize_record::<(Payment, Chunk)>(&record)?; + let (payment, chunk) = try_deserialize_record::<(ProofOfPayment, Chunk)>(&record)?; let already_exists = self .validate_key_and_existence(&chunk.network_address(), &record_key) .await?; @@ -87,12 +86,62 @@ impl Node { store_chunk_result } + RecordKind::Chunk => { error!("Chunk should not be validated at this point"); Err(Error::InvalidPutWithoutPayment( PrettyPrintRecordKey::from(&record.key).into_owned(), )) } + RecordKind::ScratchpadWithPayment => { + let record_key = record.key.clone(); + let (payment, scratchpad) = + try_deserialize_record::<(ProofOfPayment, Scratchpad)>(&record)?; + let _already_exists = self + .validate_key_and_existence(&scratchpad.network_address(), &record_key) + .await?; + + // Validate the payment and that we received what we asked. + // This stores any payments to disk + let payment_res = self + .payment_for_us_exists_and_is_still_valid( + &scratchpad.network_address(), + payment, + ) + .await; + + // Finally before we store, lets bail for any payment issues + payment_res?; + + // Writing chunk to disk takes time, hence try to execute it first. + // So that when the replicate target asking for the copy, + // the node can have a higher chance to respond. + let store_scratchpad_result = self + .validate_and_store_scratchpad_record(scratchpad, record_key.clone(), true) + .await; + + if store_scratchpad_result.is_ok() { + Marker::ValidScratchpadRecordPutFromClient(&PrettyPrintRecordKey::from( + &record_key, + )) + .log(); + self.replicate_valid_fresh_record(record_key.clone(), RecordType::Scratchpad); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network() + .notify_fetch_completed(record_key, RecordType::Scratchpad); + } + + store_scratchpad_result + } + RecordKind::Scratchpad => { + error!("Scratchpad should not be validated at this point"); + Err(Error::InvalidPutWithoutPayment( + PrettyPrintRecordKey::from(&record.key).into_owned(), + )) + } RecordKind::Spend => { let record_key = record.key.clone(); let value_to_hash = record.value.clone(); @@ -159,7 +208,7 @@ impl Node { } RecordKind::RegisterWithPayment => { let (payment, register) = - try_deserialize_record::<(Payment, SignedRegister)>(&record)?; + try_deserialize_record::<(ProofOfPayment, SignedRegister)>(&record)?; // check if the deserialized value's RegisterAddress matches the record's key let net_addr = NetworkAddress::from_register_address(*register.address()); @@ -213,7 +262,9 @@ impl Node { let record_header = RecordHeader::from_record(&record)?; match record_header.kind { // A separate flow handles payment for chunks and registers - RecordKind::ChunkWithPayment | RecordKind::RegisterWithPayment => { + RecordKind::ChunkWithPayment + | RecordKind::RegisterWithPayment + | RecordKind::ScratchpadWithPayment => { warn!("Prepaid record came with Payment, which should be handled in another flow"); Err(Error::UnexpectedRecordWithPayment( PrettyPrintRecordKey::from(&record.key).into_owned(), @@ -236,6 +287,12 @@ impl Node { self.store_chunk(&chunk) } + RecordKind::Scratchpad => { + let key = record.key.clone(); + let scratchpad = try_deserialize_record::(&record)?; + self.validate_and_store_scratchpad_record(scratchpad, key, false) + .await + } RecordKind::Spend => { let record_key = record.key.clone(); let spends = try_deserialize_record::>(&record)?; @@ -323,6 +380,69 @@ impl Node { Ok(()) } + /// Validate and store a `Scratchpad` to the RecordStore + /// + /// When a node receives an update packet: + /// Verify Name: It MUST hash the provided public key and confirm it matches the name in the packet. + /// Check Counter: It MUST ensure that the new counter value is strictly greater than the currently stored value to prevent replay attacks. + /// Verify Signature: It MUST use the public key to verify the BLS12-381 signature against the content hash and the counter. + /// Accept or Reject: If all verifications succeed, the node MUST accept the packet and replace any previous version. Otherwise, it MUST reject the update. + + pub(crate) async fn validate_and_store_scratchpad_record( + &self, + scratchpad: Scratchpad, + record_key: RecordKey, + is_client_put: bool, + ) -> Result<()> { + // owner PK is defined herein, so as long as record key and this match, we're good + let addr = scratchpad.address(); + debug!("Validating and storing scratchpad {addr:?}"); + + // check if the deserialized value's RegisterAddress matches the record's key + let scratchpad_key = NetworkAddress::ScratchpadAddress(*addr).to_record_key(); + if scratchpad_key != record_key { + warn!("Record's key does not match with the value's ScratchpadAddress, ignoring PUT."); + return Err(Error::RecordKeyMismatch); + } + + // check if the Scratchpad is present locally that we don't have a newer version + if let Some(local_pad) = self.network().get_local_record(&scratchpad_key).await? { + let local_pad = try_deserialize_record::(&local_pad)?; + if local_pad.count() >= scratchpad.count() { + warn!("Rejecting Scratchpad PUT with counter less than or equal to the current counter"); + return Err(Error::IgnoringOutdatedScratchpadPut); + } + } + + // ensure data integrity + if !scratchpad.is_valid() { + warn!("Rejecting Scratchpad PUT with invalid signature"); + return Err(Error::InvalidScratchpadSignature); + } + + info!( + "Storing sratchpad {addr:?} with content of {:?} as Record locally", + scratchpad.encrypted_data_hash() + ); + + let record = Record { + key: scratchpad_key.clone(), + value: try_serialize_record(&scratchpad, RecordKind::Scratchpad)?.to_vec(), + publisher: None, + expires: None, + }; + self.network().put_local_record(record); + + let pretty_key = PrettyPrintRecordKey::from(&scratchpad_key); + + self.record_metrics(Marker::ValidScratchpadRecordPutFromNetwork(&pretty_key)); + + if is_client_put { + self.replicate_valid_fresh_record(scratchpad_key, RecordType::Scratchpad); + } + + Ok(()) + } /// Validate and store a `Register` to the RecordStore pub(crate) async fn validate_and_store_register( &self, @@ -458,160 +578,75 @@ impl Node { Ok(()) } - /// Gets CashNotes out of Transfers, this includes network verifications of the Transfers - /// Rewraps the royalties transfers into encrypted Transfers ready to be sent directly to the beneficiary - async fn cash_notes_from_transfers( - &self, - transfers: Vec, - wallet: &HotWallet, - pretty_key: PrettyPrintRecordKey<'static>, - ) -> Result<(NanoTokens, Vec, Vec)> { - let royalties_pk = *NETWORK_ROYALTIES_PK; - let mut cash_notes = vec![]; - let mut royalties_cash_notes_r = vec![]; - let mut received_fee = NanoTokens::zero(); - - for transfer in transfers { - match transfer { - Transfer::Encrypted(_) => match self - .network() - .verify_and_unpack_transfer(&transfer, wallet) - .await - { - // transfer not for us - Err(NetworkError::Wallet(WalletError::FailedToDecypherTransfer)) => continue, - // transfer invalid - Err(e) => return Err(e.into()), - // transfer ok, add to cash_notes and continue as more transfers might be ours - Ok(cns) => cash_notes.extend(cns), - }, - Transfer::NetworkRoyalties(cashnote_redemptions) => { - match self - .network() - .verify_cash_notes_redemptions(royalties_pk, &cashnote_redemptions) - .await - { - Ok(cash_notes) => { - let received_royalties = total_cash_notes_amount(&cash_notes)?; - debug!( - "{} network royalties payment cash notes found for record {pretty_key} for a total value of {received_royalties:?}", - cash_notes.len() - ); - royalties_cash_notes_r.extend(cashnote_redemptions); - received_fee = received_fee - .checked_add(received_royalties) - .ok_or_else(|| Error::NumericOverflow)?; - } - Err(e) => { - warn!( - "Invalid network royalties payment for record {pretty_key}: {e:?}" - ); - } - } - } - } - } - - if cash_notes.is_empty() { - Err(Error::NoPaymentToOurNode(pretty_key)) - } else { - let received_fee_to_our_node = total_cash_notes_amount(&cash_notes)?; - info!( - "{} cash note/s (for a total of {received_fee_to_our_node:?}) are for us for {pretty_key}", - cash_notes.len() - ); - received_fee = received_fee - .checked_add(received_fee_to_our_node) - .ok_or_else(|| Error::NumericOverflow)?; - - Ok((received_fee, cash_notes, royalties_cash_notes_r)) - } - } - /// Perform validations on the provided `Record`. async fn payment_for_us_exists_and_is_still_valid( &self, address: &NetworkAddress, - payment: Payment, + payment: ProofOfPayment, ) -> Result<()> { let key = address.to_record_key(); let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); debug!("Validating record payment for {pretty_key}"); - // load wallet - let mut wallet = HotWallet::load_from(self.network().root_dir_path())?; - let old_balance = wallet.balance().as_nano(); - - // unpack transfer - debug!("Unpacking incoming Transfers for record {pretty_key}"); - let (received_fee, mut cash_notes, royalties_cash_notes_r) = self - .cash_notes_from_transfers(payment.transfers, &wallet, pretty_key.clone()) - .await?; - - // check for cash notes that we have already spent - // this can happen in cases where the client retries a failed PUT after we have already used the cash note - cash_notes.retain(|cash_note| { - let already_present = wallet.cash_note_presents(&cash_note.unique_pubkey()); - if already_present { - return !already_present; - } - - let spend_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); - !wallet.has_confirmed_spend(spend_addr) - }); - if cash_notes.is_empty() { - info!("All incoming cash notes were already received, no need to further process"); - return Err(Error::ReusedPayment); + // check if the quote is valid + let storecost = payment.quote.cost; + let self_peer_id = self.network().peer_id(); + if !payment.quote.check_is_signed_by_claimed_peer(self_peer_id) { + warn!("Payment quote signature is not valid for record {pretty_key}"); + return Err(Error::InvalidRequest(format!( + "Payment quote signature is not valid for record {pretty_key}" + ))); } - - debug!("Received payment of {received_fee:?} for {pretty_key}"); + debug!("Payment quote signature is valid for record {pretty_key}"); + + // verify quote timestamp + let quote_timestamp = payment.quote.timestamp; + let quote_expiration_time = quote_timestamp + Duration::from_secs(QUOTE_EXPIRATION_SECS); + let quote_expiration_time_in_secs = quote_expiration_time + .duration_since(UNIX_EPOCH) + .map_err(|e| { + Error::InvalidRequest(format!( + "Payment quote timestamp is invalid for record {pretty_key}: {e}" + )) + })? + .as_secs(); + + // check if payment is valid on chain + debug!("Verifying payment for record {pretty_key}"); + self.evm_network() + .verify_data_payment( + payment.tx_hash, + payment.quote.hash(), + *self.reward_address(), + storecost.as_atto(), + quote_expiration_time_in_secs, + ) + .await + .map_err(|e| Error::EvmNetwork(format!("Failed to verify chunk payment: {e}")))?; + debug!("Payment is valid for record {pretty_key}"); // Notify `record_store` that the node received a payment. self.network().notify_payment_received(); - // deposit the CashNotes in our wallet - wallet.deposit_and_store_to_disk(&cash_notes)?; - let new_balance = wallet.balance().as_nano(); - info!( - "The new wallet balance is {new_balance}, after earning {}", - new_balance - old_balance - ); - #[cfg(feature = "open-metrics")] - if let Some(node_metrics) = self.node_metrics() { - let _ = node_metrics + if let Some(metrics_recorder) = self.metrics_recorder() { + let _ = metrics_recorder .current_reward_wallet_balance - .set(new_balance as i64); + .inc_by(storecost.as_atto().try_into().unwrap_or(i64::MAX)); // TODO maybe metrics should be in u256 too? } + self.events_channel() + .broadcast(crate::NodeEvent::RewardReceived(storecost, address.clone())); - if royalties_cash_notes_r.is_empty() { - warn!("No network royalties payment found for record {pretty_key}"); - return Err(Error::NoNetworkRoyaltiesPayment(pretty_key.into_owned())); - } + // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): + info!("Total payment of {storecost:?} atto tokens accepted for record {pretty_key}"); - // check if the quote is valid - let storecost = payment.quote.cost; - verify_quote_for_storecost(self.network(), payment.quote, address)?; - debug!("Payment quote valid for record {pretty_key}"); - - // Let's check payment is sufficient both for our store cost and for network royalties - // Since the storage payment is made to a single node, we can calculate the royalties fee based on that single payment. - let expected_royalties_fee = calculate_royalties_fee(storecost); - let expected_fee = storecost - .checked_add(expected_royalties_fee) - .ok_or(Error::NumericOverflow)?; - - // finally, (after we accept any payments to us as they are ours now anyway) - // lets check they actually paid enough - if received_fee < expected_fee { - debug!("Payment insufficient for record {pretty_key}. {received_fee:?} is less than {expected_fee:?}"); - return Err(Error::PaymentProofInsufficientAmount { - paid: received_fee, - expected: expected_fee, - }); + // loud mode: print a celebratory message to console + #[cfg(feature = "loud")] + { + println!("🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟 RECEIVED REWARD 🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟"); + println!("Total payment of {storecost:?} atto tokens accepted for record {pretty_key}"); + println!("🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟"); } - // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): - info!("Total payment of {received_fee:?} nanos accepted for record {pretty_key}"); Ok(()) } @@ -889,19 +924,3 @@ impl Node { } } } - -// Helper to calculate total amout of tokens received in a given set of CashNotes -fn total_cash_notes_amount<'a, I>(cash_notes: I) -> Result -where - I: IntoIterator, -{ - let mut received_fee = NanoTokens::zero(); - for cash_note in cash_notes { - let amount = cash_note.value(); - received_fee = received_fee - .checked_add(amount) - .ok_or(Error::NumericOverflow)?; - } - - Ok(received_fee) -} diff --git a/sn_node/src/quote.rs b/sn_node/src/quote.rs index 2020a2995d..969d326ce0 100644 --- a/sn_node/src/quote.rs +++ b/sn_node/src/quote.rs @@ -8,21 +8,31 @@ use crate::{node::Node, Error, Result}; use libp2p::PeerId; +use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics, RewardsAddress}; use sn_networking::{calculate_cost_for_records, Network, NodeIssue}; use sn_protocol::{error::Error as ProtocolError, storage::ChunkAddress, NetworkAddress}; -use sn_transfers::{NanoTokens, PaymentQuote, QuotingMetrics}; use std::time::Duration; impl Node { pub(crate) fn create_quote_for_storecost( network: &Network, - cost: NanoTokens, + cost: AttoTokens, address: &NetworkAddress, quoting_metrics: &QuotingMetrics, + bad_nodes: Vec, + payment_address: &RewardsAddress, ) -> Result { let content = address.as_xorname().unwrap_or_default(); let timestamp = std::time::SystemTime::now(); - let bytes = PaymentQuote::bytes_for_signing(content, cost, timestamp, quoting_metrics); + let serialised_bad_nodes = rmp_serde::to_vec(&bad_nodes).unwrap_or_default(); + let bytes = PaymentQuote::bytes_for_signing( + content, + cost, + timestamp, + quoting_metrics, + &serialised_bad_nodes, + payment_address, + ); let Ok(signature) = network.sign(&bytes) else { return Err(ProtocolError::QuoteGenerationFailed); @@ -33,7 +43,9 @@ impl Node { cost, timestamp, quoting_metrics: quoting_metrics.clone(), + bad_nodes: serialised_bad_nodes, pub_key: network.get_pub_key(), + rewards_address: *payment_address, signature, }; @@ -60,12 +72,7 @@ pub(crate) fn verify_quote_for_storecost( } // check sig - let bytes = PaymentQuote::bytes_for_signing( - quote.content, - quote.cost, - quote.timestamp, - "e.quoting_metrics, - ); + let bytes = quote.bytes_for_sig(); let signature = quote.signature; if !network.verify(&bytes, &signature) { return Err(Error::InvalidQuoteSignature); @@ -96,7 +103,7 @@ pub(crate) async fn quotes_verification(network: &Network, quotes: Vec<(PeerId, .filter(|(peer_id, quote)| { let is_same_target = quote.content == self_quote.content; let is_not_self = *peer_id != network.peer_id(); - let is_not_zero_quote = quote.cost != NanoTokens::zero(); + let is_not_zero_quote = quote.cost != AttoTokens::zero(); let time_gap = Duration::from_secs(10); let is_around_same_time = if quote.timestamp > self_quote.timestamp { @@ -119,7 +126,7 @@ pub(crate) async fn quotes_verification(network: &Network, quotes: Vec<(PeerId, quotes_for_nodes_duty.retain(|(peer_id, quote)| { let cost = calculate_cost_for_records(quote.quoting_metrics.close_records_stored); - let is_same_as_expected = quote.cost == NanoTokens::from(cost); + let is_same_as_expected = quote.cost == AttoTokens::from_u64(cost); if !is_same_as_expected { info!("Quote from {peer_id:?} using a different quoting_metrics to achieve the claimed cost. Quote {quote:?} can only result in cost {cost:?}"); diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index ca631e32f4..59e0cff078 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -75,6 +75,9 @@ impl Node { retry_strategy: None, target_record: None, expected_holders: Default::default(), + // This is for replication, which doesn't have target_recrod to verify with. + // Hence value of the flag actually doesn't matter. + is_register: false, }; match node.network().get_record_from_network(key, &get_cfg).await { Ok(record) => record, diff --git a/sn_node/tests/common/client.rs b/sn_node/tests/common/client.rs index bff2c8d333..513fc46a95 100644 --- a/sn_node/tests/common/client.rs +++ b/sn_node/tests/common/client.rs @@ -6,31 +6,23 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use eyre::{bail, OptionExt, Result}; -use libp2p::PeerId; -use sn_client::{ - acc_packet::{create_faucet_account_and_wallet, load_account_wallet_or_create_with_mnemonic}, - send, Client, -}; -use sn_peers_acquisition::parse_peer_addr; +use autonomi::Client; +use evmlib::wallet::Wallet; +use eyre::Result; +use sn_evm::Amount; use sn_protocol::safenode_proto::{NodeInfoRequest, RestartRequest}; -use sn_service_management::{ - get_local_node_registry_path, safenode_manager_proto::NodeServiceRestartRequest, NodeRegistry, -}; -use sn_transfers::{HotWallet, NanoTokens, Transfer}; +use sn_service_management::{get_local_node_registry_path, NodeRegistry}; +use std::str::FromStr; use std::{net::SocketAddr, path::Path}; +use test_utils::evm::get_new_wallet; use test_utils::testnet::DeploymentInventory; -use tokio::{ - sync::Mutex, - time::{Duration, Instant}, -}; +use test_utils::{evm::get_funded_wallet, peers_from_env}; +use tokio::sync::Mutex; use tonic::Request; -use tracing::{debug, error, info, warn}; +use tracing::{debug, info}; use crate::common::get_safenode_rpc_client; -use super::get_safenode_manager_rpc_client; - /// This is a limited hard coded value as Droplet version has to contact the faucet to get the funds. /// This is limited to 10 requests to the faucet, where each request yields 100 SNT pub const INITIAL_WALLET_BALANCE: u64 = 3 * 100 * 1_000_000_000; @@ -47,10 +39,16 @@ const LOAD_FAUCET_WALLET_RETRIES: usize = 6; // mutex to restrict access to faucet wallet from concurrent tests static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); -/// Load HotWallet from dir -pub fn get_wallet(root_dir: &Path) -> HotWallet { - load_account_wallet_or_create_with_mnemonic(root_dir, None) - .expect("Wallet shall be successfully created.") +pub async fn get_client_and_funded_wallet() -> (Client, Wallet) { + match DeploymentInventory::load() { + Ok(_inventory) => { + todo!("Not implemented yet for WanNetwork"); + } + Err(_) => ( + LocalNetwork::get_client().await, + LocalNetwork::get_funded_wallet(), + ), + } } /// Get the node count @@ -58,7 +56,10 @@ pub fn get_wallet(root_dir: &Path) -> HotWallet { /// else return the local node count pub fn get_node_count() -> usize { match DeploymentInventory::load() { - Ok(inventory) => inventory.rpc_endpoints.len(), + Ok(_inventory) => { + todo!("Not implemented yet for WanNetwork"); + // inventory.rpc_endpoints.len() + } Err(_) => LOCAL_NODE_COUNT, } } @@ -69,32 +70,33 @@ pub fn get_node_count() -> usize { /// /// The genesis address is skipped for droplets as we don't want to restart the Genesis node there. /// The restarted node relies on the genesis multiaddr to bootstrap after restart. -pub fn get_all_rpc_addresses(skip_genesis_for_droplet: bool) -> Result> { +pub fn get_all_rpc_addresses(_skip_genesis_for_droplet: bool) -> Result> { match DeploymentInventory::load() { - Ok(inventory) => { - if !skip_genesis_for_droplet { - return Ok(inventory.rpc_endpoints.values().cloned().collect()); - } - // else filter out genesis - let genesis_ip = inventory - .vm_list - .iter() - .find_map(|(name, addr)| { - if name.contains("genesis") { - Some(*addr) - } else { - None - } - }) - .ok_or_eyre("Could not get the genesis VM's addr")?; - - let rpc_endpoints = inventory - .rpc_endpoints - .into_iter() - .filter(|(_, addr)| addr.ip() != genesis_ip) - .map(|(_, addr)| addr) - .collect(); - Ok(rpc_endpoints) + Ok(_inventory) => { + todo!("Not implemented yet for WanNetwork"); + // if !skip_genesis_for_droplet { + // return Ok(inventory.rpc_endpoints.values().cloned().collect()); + // } + // // else filter out genesis + // let genesis_ip = inventory + // .vm_list + // .iter() + // .find_map(|(name, addr)| { + // if name.contains("genesis") { + // Some(*addr) + // } else { + // None + // } + // }) + // .ok_or_eyre("Could not get the genesis VM's addr")?; + + // let rpc_endpoints = inventory + // .rpc_endpoints + // .into_iter() + // .filter(|(_, addr)| addr.ip() != genesis_ip) + // .map(|(_, addr)| addr) + // .collect(); + // Ok(rpc_endpoints) } Err(_) => { let local_node_reg_path = &get_local_node_registry_path()?; @@ -109,121 +111,55 @@ pub fn get_all_rpc_addresses(skip_genesis_for_droplet: bool) -> Result Result { +/// Transfer tokens from the provided wallet to a newly created wallet +/// Returns the newly created wallet +pub async fn transfer_to_new_wallet(from: &Wallet, amount: usize) -> Result { match DeploymentInventory::load() { - Ok(inventory) => { - Droplet::get_funded_wallet(client, to_wallet_dir, inventory.faucet_address, false).await + Ok(_inventory) => { + todo!("Not implemented yet for WanNetwork"); + // Droplet::get_funded_wallet(client, to_wallet_dir, inventory.faucet_address, false).await } - Err(_) => NonDroplet::get_funded_wallet(client, to_wallet_dir, false).await, + Err(_) => LocalNetwork::transfer_to_new_wallet(from, amount).await, } } -/// Create a client and fund the wallet. -/// If SN_INVENTORY flag is passed, the wallet is funded by fetching it from the faucet -/// Else create a genesis wallet and transfer funds from there. -/// -/// We get a maximum of 10*100 SNT from the network. This is hardcoded as the Droplet tests have the fetch the -/// coins from the faucet and each request is limited to 100 SNT. -pub async fn get_client_and_funded_wallet(root_dir: &Path) -> Result<(Client, HotWallet)> { - match DeploymentInventory::load() { - Ok(inventory) => { - let client = Droplet::get_client(&inventory).await; - let local_wallet = - Droplet::get_funded_wallet(&client, root_dir, inventory.faucet_address, true) - .await?; - Ok((client, local_wallet)) - } - Err(_) => { - let client = NonDroplet::get_client().await; - let local_wallet = NonDroplet::get_funded_wallet(&client, root_dir, true).await?; - - Ok((client, local_wallet)) - } - } -} - -pub struct NonDroplet; -impl NonDroplet { +pub struct LocalNetwork; +impl LocalNetwork { /// Get a new Client for testing pub async fn get_client() -> Client { - let secret_key = bls::SecretKey::random(); - - let bootstrap_peers = if !cfg!(feature = "local-discovery") { - match std::env::var("SAFE_PEERS") { - Ok(str) => match parse_peer_addr(&str) { - Ok(peer) => Some(vec![peer]), - Err(err) => panic!("Can't parse SAFE_PEERS {str:?} with error {err:?}"), - }, - Err(err) => panic!("Can't get env var SAFE_PEERS with error {err:?}"), - } - } else { - None - }; + let bootstrap_peers = peers_from_env().expect("Failed to get bootstrap peers from env"); println!("Client bootstrap with peer {bootstrap_peers:?}"); info!("Client bootstrap with peer {bootstrap_peers:?}"); - Client::new(secret_key, bootstrap_peers, None, None) + Client::connect(&bootstrap_peers) .await .expect("Client shall be successfully created.") } - pub async fn get_funded_wallet( - client: &Client, - root_dir: &Path, - initial_wallet: bool, - ) -> Result { - let wallet_balance = if initial_wallet { - NanoTokens::from(INITIAL_WALLET_BALANCE) - } else { - NanoTokens::from(ADD_FUNDS_TO_WALLET) - }; - let _guard = FAUCET_WALLET_MUTEX.lock().await; - let from_faucet_wallet = NonDroplet::load_faucet_wallet().await?; - let mut local_wallet = get_wallet(root_dir); - - println!("Getting {wallet_balance} tokens from the faucet..."); - info!("Getting {wallet_balance} tokens from the faucet..."); - let tokens = send( - from_faucet_wallet, - wallet_balance, - local_wallet.address(), - client, - true, - ) - .await?; + fn get_funded_wallet() -> evmlib::wallet::Wallet { + get_funded_wallet() + } - println!("Verifying the transfer from faucet..."); - info!("Verifying the transfer from faucet..."); - client.verify_cashnote(&tokens).await?; - local_wallet.deposit_and_store_to_disk(&vec![tokens])?; - assert_eq!(local_wallet.balance(), wallet_balance); - println!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); - info!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); + /// Transfer tokens from the provided wallet to a newly created wallet + /// Returns the newly created wallet + async fn transfer_to_new_wallet(from: &Wallet, amount: usize) -> Result { + let wallet_balance = from.balance_of_tokens().await?; + let gas_balance = from.balance_of_gas_tokens().await?; - Ok(local_wallet) - } + debug!("Wallet balance: {wallet_balance}, Gas balance: {gas_balance}"); - async fn load_faucet_wallet() -> Result { - info!("Loading faucet..."); - let now = Instant::now(); - for attempt in 1..LOAD_FAUCET_WALLET_RETRIES + 1 { - let faucet_wallet = create_faucet_account_and_wallet(); - - let faucet_balance = faucet_wallet.balance(); - if !faucet_balance.is_zero() { - info!("Loaded faucet wallet after {:?}", now.elapsed()); - return Ok(faucet_wallet); - } - tokio::time::sleep(Duration::from_secs(1)).await; - warn!("The faucet wallet is empty. Attempts: {attempt}/{LOAD_FAUCET_WALLET_RETRIES}") - } - bail!("The faucet wallet is empty even after {LOAD_FAUCET_WALLET_RETRIES} retries. Bailing after {:?}. Check the faucet_server logs.", now.elapsed()); + let new_wallet = get_new_wallet()?; + + from.transfer_tokens(new_wallet.address(), Amount::from(amount)) + .await?; + + from.transfer_gas_tokens( + new_wallet.address(), + Amount::from_str("10000000000000000000")?, + ) + .await?; + + Ok(new_wallet) } // Restart a local node by sending in the SafenodeRpcCmd::Restart to the node's RPC endpoint. @@ -268,127 +204,127 @@ impl NonDroplet { } } -pub struct Droplet; -impl Droplet { - /// Create a new client and bootstrap from the provided safe_peers - pub async fn get_client(inventory: &DeploymentInventory) -> Client { - let secret_key = bls::SecretKey::random(); - - let mut bootstrap_peers = Vec::new(); - for peer in inventory - .peers - .iter() - .chain(vec![&inventory.genesis_multiaddr]) - { - match parse_peer_addr(peer) { - Ok(peer) => bootstrap_peers.push(peer), - Err(err) => error!("Can't parse SAFE_PEERS {peer:?} with error {err:?}"), - } - } - if bootstrap_peers.is_empty() { - panic!("Could parse/find any bootstrap peers"); - } - - println!("Client bootstrap with peer {bootstrap_peers:?}"); - info!("Client bootstrap with peer {bootstrap_peers:?}"); - Client::new(secret_key, Some(bootstrap_peers), None, None) - .await - .expect("Client shall be successfully created.") - } - - // Create a wallet at root_dir and fetch the amount from the faucet url - async fn get_funded_wallet( - client: &Client, - root_dir: &Path, - faucet_socket: String, - initial_wallet: bool, - ) -> Result { - let _guard = FAUCET_WALLET_MUTEX.lock().await; - - let requests_to_faucet = if initial_wallet { - let requests_to_faucet = 3; - assert_eq!( - requests_to_faucet * 100 * 1_000_000_000, - INITIAL_WALLET_BALANCE - ); - requests_to_faucet - } else { - let requests_to_faucet = 1; - assert_eq!( - requests_to_faucet * 100 * 1_000_000_000, - ADD_FUNDS_TO_WALLET - ); - requests_to_faucet - }; - - let mut local_wallet = get_wallet(root_dir); - let address_hex = hex::encode(local_wallet.address().to_bytes()); - - println!( - "Getting {} tokens from the faucet... num_requests:{requests_to_faucet}", - NanoTokens::from(INITIAL_WALLET_BALANCE) - ); - info!( - "Getting {} tokens from the faucet... num_requests:{requests_to_faucet}", - NanoTokens::from(INITIAL_WALLET_BALANCE) - ); - for _ in 0..requests_to_faucet { - let faucet_url = format!("http://{faucet_socket}/{address_hex}"); - - // Get transfer from faucet - let transfer = reqwest::get(&faucet_url).await?.text().await?; - let transfer = match Transfer::from_hex(&transfer) { - Ok(transfer) => transfer, - Err(err) => { - println!("Failed to parse transfer: {err:?}"); - println!("Transfer: \"{transfer}\""); - error!("Failed to parse transfer: {err:?}"); - error!("Transfer: \"{transfer}\""); - return Err(err.into()); - } - }; - let cashnotes = match client.receive(&transfer, &local_wallet).await { - Ok(cashnotes) => cashnotes, - Err(err) => { - println!("Failed to verify and redeem transfer: {err:?}"); - error!("Failed to verify and redeem transfer: {err:?}"); - return Err(err.into()); - } - }; - info!("Successfully verified transfer."); - local_wallet.deposit_and_store_to_disk(&cashnotes)?; - } - println!( - "Successfully got {} after {requests_to_faucet} requests to the faucet", - NanoTokens::from(INITIAL_WALLET_BALANCE) - ); - info!( - "Successfully got {} after {requests_to_faucet} requests to the faucet", - NanoTokens::from(INITIAL_WALLET_BALANCE) - ); - - Ok(local_wallet) - } - - // Restart a remote safenode service by sending a RPC to the safenode manager daemon. - pub async fn restart_node( - peer_id: &PeerId, - daemon_endpoint: SocketAddr, - retain_peer_id: bool, - ) -> Result<()> { - let mut rpc_client = get_safenode_manager_rpc_client(daemon_endpoint).await?; - - let _response = rpc_client - .restart_node_service(Request::new(NodeServiceRestartRequest { - peer_id: peer_id.to_bytes(), - delay_millis: 0, - retain_peer_id, - })) - .await?; - - println!("Node restart requested to safenodemand {daemon_endpoint}"); - info!("Node restart requested to safenodemand {daemon_endpoint}"); - - Ok(()) - } +pub struct WanNetwork; +impl WanNetwork { + // /// Create a new client and bootstrap from the provided safe_peers + // pub async fn get_client(inventory: &DeploymentInventory) -> Client { + // let secret_key = bls::SecretKey::random(); + + // let mut bootstrap_peers = Vec::new(); + // for peer in inventory + // .peers + // .iter() + // .chain(vec![&inventory.genesis_multiaddr]) + // { + // match parse_peer_addr(peer) { + // Ok(peer) => bootstrap_peers.push(peer), + // Err(err) => error!("Can't parse SAFE_PEERS {peer:?} with error {err:?}"), + // } + // } + // if bootstrap_peers.is_empty() { + // panic!("Could parse/find any bootstrap peers"); + // } + + // println!("Client bootstrap with peer {bootstrap_peers:?}"); + // info!("Client bootstrap with peer {bootstrap_peers:?}"); + // Client::new(secret_key, Some(bootstrap_peers), None, None) + // .await + // .expect("Client shall be successfully created.") + // } + + // // Create a wallet at root_dir and fetch the amount from the faucet url + // async fn get_funded_wallet( + // client: &Client, + // root_dir: &Path, + // faucet_socket: String, + // initial_wallet: bool, + // ) -> Result { + // let _guard = FAUCET_WALLET_MUTEX.lock().await; + + // let requests_to_faucet = if initial_wallet { + // let requests_to_faucet = 3; + // assert_eq!( + // requests_to_faucet * 100 * 1_000_000_000, + // INITIAL_WALLET_BALANCE + // ); + // requests_to_faucet + // } else { + // let requests_to_faucet = 1; + // assert_eq!( + // requests_to_faucet * 100 * 1_000_000_000, + // ADD_FUNDS_TO_WALLET + // ); + // requests_to_faucet + // }; + + // let mut local_wallet = get_wallet(root_dir); + // let address_hex = hex::encode(local_wallet.address().to_bytes()); + + // println!( + // "Getting {} tokens from the faucet... num_requests:{requests_to_faucet}", + // NanoTokens::from(INITIAL_WALLET_BALANCE) + // ); + // info!( + // "Getting {} tokens from the faucet... num_requests:{requests_to_faucet}", + // NanoTokens::from(INITIAL_WALLET_BALANCE) + // ); + // for _ in 0..requests_to_faucet { + // let faucet_url = format!("http://{faucet_socket}/{address_hex}"); + + // // Get transfer from faucet + // let transfer = reqwest::get(&faucet_url).await?.text().await?; + // let transfer = match Transfer::from_hex(&transfer) { + // Ok(transfer) => transfer, + // Err(err) => { + // println!("Failed to parse transfer: {err:?}"); + // println!("Transfer: \"{transfer}\""); + // error!("Failed to parse transfer: {err:?}"); + // error!("Transfer: \"{transfer}\""); + // return Err(err.into()); + // } + // }; + // let cashnotes = match client.receive(&transfer, &local_wallet).await { + // Ok(cashnotes) => cashnotes, + // Err(err) => { + // println!("Failed to verify and redeem transfer: {err:?}"); + // error!("Failed to verify and redeem transfer: {err:?}"); + // return Err(err.into()); + // } + // }; + // info!("Successfully verified transfer."); + // local_wallet.deposit_and_store_to_disk(&cashnotes)?; + // } + // println!( + // "Successfully got {} after {requests_to_faucet} requests to the faucet", + // NanoTokens::from(INITIAL_WALLET_BALANCE) + // ); + // info!( + // "Successfully got {} after {requests_to_faucet} requests to the faucet", + // NanoTokens::from(INITIAL_WALLET_BALANCE) + // ); + + // Ok(local_wallet) + // } + + // // Restart a remote safenode service by sending a RPC to the safenode manager daemon. + // pub async fn restart_node( + // peer_id: &PeerId, + // daemon_endpoint: SocketAddr, + // retain_peer_id: bool, + // ) -> Result<()> { + // let mut rpc_client = get_safenode_manager_rpc_client(daemon_endpoint).await?; + + // let _response = rpc_client + // .restart_node_service(Request::new(NodeServiceRestartRequest { + // peer_id: peer_id.to_bytes(), + // delay_millis: 0, + // retain_peer_id, + // })) + // .await?; + + // println!("Node restart requested to safenodemand {daemon_endpoint}"); + // info!("Node restart requested to safenodemand {daemon_endpoint}"); + + // Ok(()) + // } } diff --git a/sn_node/tests/common/mod.rs b/sn_node/tests/common/mod.rs index 6366e2092c..fc3a94e97e 100644 --- a/sn_node/tests/common/mod.rs +++ b/sn_node/tests/common/mod.rs @@ -9,67 +9,50 @@ pub mod client; -use self::client::{Droplet, NonDroplet}; -use bytes::Bytes; +use self::client::LocalNetwork; use eyre::{bail, eyre, OptionExt, Result}; use itertools::Either; use libp2p::PeerId; -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; -use self_encryption::MIN_ENCRYPTABLE_BYTES; -use sn_client::{Client, FilesApi}; -use sn_protocol::{ - safenode_proto::{safe_node_client::SafeNodeClient, NodeInfoRequest}, - storage::ChunkAddress, -}; +use sn_protocol::safenode_proto::{safe_node_client::SafeNodeClient, NodeInfoRequest}; use sn_service_management::{ get_local_node_registry_path, safenode_manager_proto::safe_node_manager_client::SafeNodeManagerClient, NodeRegistry, }; -use std::{ - fs::File, - io::Write, - net::SocketAddr, - path::{Path, PathBuf}, - time::Duration, -}; +use std::{net::SocketAddr, time::Duration}; use test_utils::testnet::DeploymentInventory; use tonic::Request; use tracing::{debug, error, warn}; -use xor_name::XorName; -type ResultRandomContent = Result<(FilesApi, Bytes, ChunkAddress, Vec<(XorName, PathBuf)>)>; +// type ResultRandomContent = Result<(FilesApi, Bytes, ChunkAddress, Vec<(XorName, PathBuf)>)>; -pub fn random_content( - client: &Client, - wallet_dir: PathBuf, - chunk_dir: &Path, -) -> ResultRandomContent { - let mut rng = rand::thread_rng(); +// pub fn random_content( +// client: &Client, +// wallet_dir: PathBuf, +// chunk_dir: &Path, +// ) -> ResultRandomContent { +// let mut rng = rand::thread_rng(); - let random_len = rng.gen_range(MIN_ENCRYPTABLE_BYTES..1024 * MIN_ENCRYPTABLE_BYTES); - let random_length_content: Vec = - >::sample_iter(Standard, &mut rng) - .take(random_len) - .collect(); +// let random_len = rng.gen_range(MIN_ENCRYPTABLE_BYTES..1024 * MIN_ENCRYPTABLE_BYTES); +// let random_length_content: Vec = +// >::sample_iter(Standard, &mut rng) +// .take(random_len) +// .collect(); - let file_path = chunk_dir.join("random_content"); - let mut output_file = File::create(file_path.clone())?; - output_file.write_all(&random_length_content)?; +// let file_path = chunk_dir.join("random_content"); +// let mut output_file = File::create(file_path.clone())?; +// output_file.write_all(&random_length_content)?; - let files_api = FilesApi::new(client.clone(), wallet_dir); - let (head_chunk_address, _data_map, _file_size, chunks) = - FilesApi::chunk_file(&file_path, chunk_dir, true)?; +// let files_api = FilesApi::new(client.clone(), wallet_dir); +// let (head_chunk_address, _data_map, _file_size, chunks) = +// FilesApi::chunk_file(&file_path, chunk_dir, true)?; - Ok(( - files_api, - random_length_content.into(), - head_chunk_address, - chunks, - )) -} +// Ok(( +// files_api, +// random_length_content.into(), +// head_chunk_address, +// chunks, +// )) +// } // Connect to a RPC socket addr with retry pub async fn get_safenode_rpc_client( @@ -237,22 +220,23 @@ impl NodeRestart { ) -> Result<()> { match &self.inventory_file { Either::Left(_inv) => { - match Droplet::restart_node(&peer_id, endpoint, self.retain_peer_id) - .await - .map_err(|err| eyre!("Failed to restart peer {peer_id:} on daemon endpoint: {endpoint:?} with err {err:?}")) { - Ok(_) => { - self.next_to_restart_idx += 1; - }, - Err(err) => { - if progress_on_error { - self.next_to_restart_idx += 1; - } - return Err(err); - }, - } + todo!("Not implemented yet for WanNetwork"); + // match WanNetwork::restart_node(&peer_id, endpoint, self.retain_peer_id) + // .await + // .map_err(|err| eyre!("Failed to restart peer {peer_id:} on daemon endpoint: {endpoint:?} with err {err:?}")) { + // Ok(_) => { + // self.next_to_restart_idx += 1; + // }, + // Err(err) => { + // if progress_on_error { + // self.next_to_restart_idx += 1; + // } + // return Err(err); + // }, + // } }, Either::Right(_reg) => { - match NonDroplet::restart_node(endpoint, self.retain_peer_id).await + match LocalNetwork::restart_node(endpoint, self.retain_peer_id).await .map_err(|err| eyre!("Failed to restart peer {peer_id:?} on safenode RPC endpoint: {endpoint:?} with err {err:?}")) { Ok(_) => { self.next_to_restart_idx += 1; diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index baba07c851..347c74dc44 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -8,44 +8,38 @@ mod common; -use crate::common::client::{add_funds_to_wallet, get_client_and_funded_wallet}; -use assert_fs::TempDir; -use common::{ - client::{get_node_count, get_wallet}, +use crate::common::{ + client::{get_client_and_funded_wallet, get_node_count}, NodeRestart, }; -use eyre::{bail, eyre, Result}; -use rand::{rngs::OsRng, Rng}; -use sn_client::{Client, Error, FilesApi, FilesDownload, Uploader, WalletClient}; +use autonomi::{Client, Wallet}; +use common::client::transfer_to_new_wallet; +use eyre::{bail, ErrReport, Result}; +use rand::Rng; +use self_encryption::MAX_CHUNK_SIZE; use sn_logging::LogBuilder; -use sn_protocol::{ - storage::{ChunkAddress, RegisterAddress, SpendAddress}, - NetworkAddress, -}; -use sn_registers::Permissions; -use sn_transfers::HotWallet; -use sn_transfers::{CashNote, MainSecretKey, NanoTokens}; +use sn_protocol::{storage::ChunkAddress, NetworkAddress}; use std::{ collections::{BTreeMap, VecDeque}, fmt, - fs::{create_dir_all, File}, - io::Write, - path::{Path, PathBuf}, - sync::Arc, + fs::create_dir_all, + sync::{Arc, LazyLock}, time::{Duration, Instant}, }; use tempfile::tempdir; +use test_utils::gen_random_data; use tokio::{sync::RwLock, task::JoinHandle, time::sleep}; use tracing::{debug, error, info, trace, warn}; use xor_name::XorName; +const TOKENS_TO_TRANSFER: usize = 10000000; + const EXTRA_CHURN_COUNT: u32 = 5; const CHURN_CYCLES: u32 = 2; const CHUNK_CREATION_RATIO_TO_CHURN: u32 = 15; const REGISTER_CREATION_RATIO_TO_CHURN: u32 = 15; -const CASHNOTE_CREATION_RATIO_TO_CHURN: u32 = 15; -const CHUNKS_SIZE: usize = 1024 * 1024; +static DATA_SIZE: LazyLock = LazyLock::new(|| *MAX_CHUNK_SIZE / 3); const CONTENT_QUERY_RATIO_TO_CHURN: u32 = 40; const MAX_NUM_OF_QUERY_ATTEMPTS: u8 = 5; @@ -55,12 +49,11 @@ const MAX_NUM_OF_QUERY_ATTEMPTS: u8 = 5; const TEST_DURATION: Duration = Duration::from_secs(60 * 60); // 1hr type ContentList = Arc>>; -type CashNoteMap = Arc>>; struct ContentError { net_addr: NetworkAddress, attempts: u8, - last_err: Error, + last_err: ErrReport, } impl fmt::Debug for ContentError { @@ -118,64 +111,41 @@ async fn data_availability_during_churn() -> Result<()> { if chunks_only { " (Chunks only)" } else { "" } ); - // The testnet will create a `faucet` at last. To avoid mess up with that, - // wait for a while to ensure the spends of that got settled. - sleep(std::time::Duration::from_secs(10)).await; - - info!("Creating a client and paying wallet..."); - let paying_wallet_dir = TempDir::new()?; - let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - // Waiting for the paying_wallet funded. - sleep(std::time::Duration::from_secs(10)).await; + let (client, main_wallet) = get_client_and_funded_wallet().await; info!( - "Client and paying_wallet created with signing key: {:?}", - client.signer_pk() + "Client and wallet created. Main wallet address: {:?}", + main_wallet.address() ); // Shared bucket where we keep track of content created/stored on the network let content = ContentList::default(); - // Shared bucket where we keep track of CashNotes created/stored on the network - let cash_notes = CashNoteMap::default(); - // Spawn a task to create Registers and CashNotes at random locations, // at a higher frequency than the churning events - if !chunks_only { - info!("Creating transfer wallet taking balance from the payment wallet"); - let transfers_wallet_dir = TempDir::new()?; - let transfers_wallet = add_funds_to_wallet(&client, transfers_wallet_dir.path()).await?; - info!("Transfer wallet created"); - - // Waiting for the transfers_wallet funded. - sleep(std::time::Duration::from_secs(10)).await; - - create_registers_task( + let create_register_handle = if !chunks_only { + let register_wallet = transfer_to_new_wallet(&main_wallet, TOKENS_TO_TRANSFER).await?; + let create_register_handle = create_registers_task( client.clone(), + register_wallet, Arc::clone(&content), churn_period, - paying_wallet_dir.path().to_path_buf(), ); - - create_cash_note_task( - client.clone(), - transfers_wallet, - Arc::clone(&content), - Arc::clone(&cash_notes), - churn_period, - ); - } + Some(create_register_handle) + } else { + None + }; println!("Uploading some chunks before carry out node churning"); info!("Uploading some chunks before carry out node churning"); + let chunk_wallet = transfer_to_new_wallet(&main_wallet, TOKENS_TO_TRANSFER).await?; // Spawn a task to store Chunks at random locations, at a higher frequency than the churning events - store_chunks_task( + let store_chunks_handle = store_chunks_task( client.clone(), + chunk_wallet, Arc::clone(&content), churn_period, - paying_wallet_dir.path().to_path_buf(), ); // Spawn a task to churn nodes @@ -194,9 +164,7 @@ async fn data_availability_during_churn() -> Result<()> { client.clone(), Arc::clone(&content), Arc::clone(&content_erred), - Arc::clone(&cash_notes), churn_period, - paying_wallet_dir.path().to_path_buf(), ); // Spawn a task to retry querying the content that failed, up to 'MAX_NUM_OF_QUERY_ATTEMPTS' times, @@ -205,9 +173,7 @@ async fn data_availability_during_churn() -> Result<()> { client.clone(), Arc::clone(&content_erred), Arc::clone(&failures), - Arc::clone(&cash_notes), churn_period, - paying_wallet_dir.path().to_path_buf(), ); info!("All tasks have been spawned. The test is now running..."); @@ -215,14 +181,32 @@ async fn data_availability_during_churn() -> Result<()> { let start_time = Instant::now(); while start_time.elapsed() < test_duration { + if store_chunks_handle.is_finished() { + bail!("Store chunks task has finished before the test duration. Probably due to an error."); + } + if let Some(handle) = &create_register_handle { + if handle.is_finished() { + bail!("Create registers task has finished before the test duration. Probably due to an error."); + } + } + let failed = failures.read().await; - info!( - "Current failures after {:?} ({}): {:?}", - start_time.elapsed(), - failed.len(), - failed.values() - ); - sleep(churn_period).await; + if start_time.elapsed().as_secs() % 10 == 0 { + println!( + "Current failures after {:?} ({}): {:?}", + start_time.elapsed(), + failed.len(), + failed.values() + ); + info!( + "Current failures after {:?} ({}): {:?}", + start_time.elapsed(), + failed.len(), + failed.values() + ); + } + + sleep(Duration::from_secs(3)).await; } println!(); @@ -249,20 +233,10 @@ async fn data_availability_during_churn() -> Result<()> { for net_addr in content.iter() { let client = client.clone(); let net_addr = net_addr.clone(); - let cash_notes = Arc::clone(&cash_notes); let failures = Arc::clone(&failures); - let wallet_dir = paying_wallet_dir.to_path_buf().clone(); let handle = tokio::spawn(async move { - final_retry_query_content( - &client, - &net_addr, - cash_notes, - churn_period, - failures, - &wallet_dir, - ) - .await + final_retry_query_content(&client, &net_addr, churn_period, failures).await }); handles.push(handle); } @@ -290,85 +264,64 @@ async fn data_availability_during_churn() -> Result<()> { Ok(()) } -// Spawns a task which periodically creates CashNotes at random locations. -fn create_cash_note_task( - client: Client, - transfers_wallet: HotWallet, - content: ContentList, - cash_notes: CashNoteMap, - churn_period: Duration, -) { - let _handle = tokio::spawn(async move { - // Create CashNote at a higher frequency than the churning events - let delay = churn_period / CASHNOTE_CREATION_RATIO_TO_CHURN; - - let mut wallet_client = WalletClient::new(client.clone(), transfers_wallet); - - loop { - sleep(delay).await; - - let dest_pk = MainSecretKey::random().main_pubkey(); - let cash_note = wallet_client - .send_cash_note(NanoTokens::from(10), dest_pk, true) - .await - .unwrap_or_else(|_| panic!("Failed to send CashNote to {dest_pk:?}")); - - let cash_note_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); - let net_addr = NetworkAddress::SpendAddress(cash_note_addr); - println!("Created CashNote at {cash_note_addr:?} after {delay:?}"); - debug!("Created CashNote at {cash_note_addr:?} after {delay:?}"); - content.write().await.push_back(net_addr); - let _ = cash_notes.write().await.insert(cash_note_addr, cash_note); - } - }); -} - // Spawns a task which periodically creates Registers at random locations. fn create_registers_task( client: Client, + wallet: Wallet, content: ContentList, churn_period: Duration, - paying_wallet_dir: PathBuf, -) { - let _handle = tokio::spawn(async move { +) -> JoinHandle> { + let handle: JoinHandle> = tokio::spawn(async move { // Create Registers at a higher frequency than the churning events let delay = churn_period / REGISTER_CREATION_RATIO_TO_CHURN; - let paying_wallet = get_wallet(&paying_wallet_dir); - - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - loop { - let meta = XorName(rand::random()); - let owner = client.signer_pk(); + let owner = Client::register_generate_key(); + let random_name = XorName(rand::random()).to_string(); + let random_data = gen_random_data(*DATA_SIZE); - let addr = RegisterAddress::new(meta, owner); - println!("Creating Register at {addr:?} in {delay:?}"); - debug!("Creating Register at {addr:?} in {delay:?}"); sleep(delay).await; - match client - .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) - .await - { - Ok(_) => content - .write() + let mut retries = 1; + loop { + match client + .register_create(random_data.clone(), &random_name, owner.clone(), &wallet) .await - .push_back(NetworkAddress::RegisterAddress(addr)), - Err(err) => println!("Discarding new Register ({addr:?}) due to error: {err:?}"), + { + Ok(register) => { + let addr = register.address(); + println!("Created new Register ({addr:?}) after a delay of: {delay:?}"); + content + .write() + .await + .push_back(NetworkAddress::RegisterAddress(*addr)); + break; + } + Err(err) => { + println!("Failed to create register: {err:?}. Retrying ..."); + error!("Failed to create register: {err:?}. Retrying ..."); + if retries >= 3 { + println!("Failed to create register after 3 retries: {err}"); + error!("Failed to create register after 3 retries: {err}"); + bail!("Failed to create register after 3 retries: {err}"); + } + retries += 1; + } + } } } }); + handle } // Spawns a task which periodically stores Chunks at random locations. fn store_chunks_task( client: Client, + wallet: Wallet, content: ContentList, churn_period: Duration, - paying_wallet_dir: PathBuf, -) { - let _handle: JoinHandle> = tokio::spawn(async move { +) -> JoinHandle> { + let handle: JoinHandle> = tokio::spawn(async move { let temp_dir = tempdir().expect("Can not create a temp directory for store_chunks_task!"); let output_dir = temp_dir.path().join("chunk_path"); create_dir_all(output_dir.clone()) @@ -377,67 +330,47 @@ fn store_chunks_task( // Store Chunks at a higher frequency than the churning events let delay = churn_period / CHUNK_CREATION_RATIO_TO_CHURN; - let mut rng = OsRng; - loop { - let random_bytes: Vec = ::std::iter::repeat(()) - .map(|()| rng.gen::()) - .take(CHUNKS_SIZE) - .collect(); - let chunk_size = random_bytes.len(); - - let chunk_name = XorName::from_content(&random_bytes); - - let file_path = temp_dir.path().join(hex::encode(chunk_name)); - let mut chunk_file = - File::create(&file_path).expect("failed to create temp chunk file"); - chunk_file - .write_all(&random_bytes) - .expect("failed to write to temp chunk file"); - - let (addr, _data_map, _file_size, chunks) = - FilesApi::chunk_file(&file_path, &output_dir, true).expect("Failed to chunk bytes"); - - info!( - "Paying storage for ({}) new Chunk/s of file ({} bytes) at {addr:?} in {delay:?}", - chunks.len(), - chunk_size - ); - sleep(delay).await; - - let chunks_len = chunks.len(); - let chunks_name = chunks.iter().map(|(name, _)| *name).collect::>(); - - let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.clone()); - uploader.set_show_holders(true); - uploader.insert_chunk_paths(chunks); - - let cost = match uploader.start_upload().await { - Ok(stats) => stats - .royalty_fees - .checked_add(stats.storage_cost) - .ok_or(eyre!("Total storage cost exceed possible token amount"))?, - Err(err) => { - bail!("Bailing w/ new Chunk ({addr:?}) due to error: {err:?}"); + let random_data = gen_random_data(*DATA_SIZE); + + // FIXME: The client does not have the retry repay to different payee feature yet. + // Retry here for now + let mut retries = 1; + loop { + match client + .data_put(random_data.clone(), &wallet) + .await + .inspect_err(|err| { + println!("Error to put chunk: {err:?}"); + error!("Error to put chunk: {err:?}") + }) { + Ok(data_map) => { + println!("Stored Chunk/s at {data_map:?} after a delay of: {delay:?}"); + info!("Stored Chunk/s at {data_map:?} after a delay of: {delay:?}"); + + content + .write() + .await + .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(data_map))); + break; + } + Err(err) => { + println!("Failed to store chunk: {err:?}. Retrying ..."); + error!("Failed to store chunk: {err:?}. Retrying ..."); + if retries >= 3 { + println!("Failed to store chunk after 3 retries: {err}"); + error!("Failed to store chunk after 3 retries: {err}"); + bail!("Failed to store chunk after 3 retries: {err}"); + } + retries += 1; + } } - }; + } - println!( - "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" - ); - info!( - "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" - ); sleep(delay).await; - - for chunk_name in chunks_name { - content - .write() - .await - .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(chunk_name))); - } } }); + handle } // Spawns a task which periodically queries a content by randomly choosing it from the list @@ -446,9 +379,7 @@ fn query_content_task( client: Client, content: ContentList, content_erred: ContentErredList, - cash_notes: CashNoteMap, churn_period: Duration, - root_dir: PathBuf, ) { let _handle = tokio::spawn(async move { let delay = churn_period / CONTENT_QUERY_RATIO_TO_CHURN; @@ -467,7 +398,7 @@ fn query_content_task( trace!("Querying content (bucket index: {index}) at {net_addr:?} in {delay:?}"); sleep(delay).await; - match query_content(&client, &root_dir, &net_addr, Arc::clone(&cash_notes)).await { + match query_content(&client, &net_addr).await { Ok(_) => { let _ = content_erred.write().await.remove(&net_addr); } @@ -530,9 +461,7 @@ fn retry_query_content_task( client: Client, content_erred: ContentErredList, failures: ContentErredList, - cash_notes: CashNoteMap, churn_period: Duration, - wallet_dir: PathBuf, ) { let _handle = tokio::spawn(async move { let delay = 2 * churn_period; @@ -547,9 +476,7 @@ fn retry_query_content_task( println!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); info!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); - if let Err(last_err) = - query_content(&client, &wallet_dir, &net_addr, Arc::clone(&cash_notes)).await - { + if let Err(last_err) = query_content(&client, &net_addr).await { println!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); warn!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); // We only keep it to retry 'MAX_NUM_OF_QUERY_ATTEMPTS' times, @@ -575,19 +502,15 @@ fn retry_query_content_task( async fn final_retry_query_content( client: &Client, net_addr: &NetworkAddress, - cash_notes: CashNoteMap, churn_period: Duration, failures: ContentErredList, - wallet_dir: &Path, ) -> Result<()> { let mut attempts = 1; let net_addr = net_addr.clone(); loop { println!("Final querying content at {net_addr}, attempt: #{attempts} ..."); debug!("Final querying content at {net_addr}, attempt: #{attempts} ..."); - if let Err(last_err) = - query_content(client, wallet_dir, &net_addr, Arc::clone(&cash_notes)).await - { + if let Err(last_err) = query_content(client, &net_addr).await { if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { println!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); error!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); @@ -607,36 +530,14 @@ async fn final_retry_query_content( } } -async fn query_content( - client: &Client, - wallet_dir: &Path, - net_addr: &NetworkAddress, - cash_notes: CashNoteMap, -) -> Result<(), Error> { +async fn query_content(client: &Client, net_addr: &NetworkAddress) -> Result<()> { match net_addr { - NetworkAddress::SpendAddress(addr) => { - if let Some(cash_note) = cash_notes.read().await.get(addr) { - match client.verify_cashnote(cash_note).await { - Ok(_) => Ok(()), - Err(err) => Err(Error::CouldNotVerifyTransfer(format!( - "Verification of cash_note {addr:?} failed with error: {err:?}" - ))), - } - } else { - Err(Error::CouldNotVerifyTransfer(format!( - "Do not have the CashNote: {addr:?}" - ))) - } - } NetworkAddress::RegisterAddress(addr) => { - let _ = client.get_register(*addr).await?; + let _ = client.register_get(*addr).await?; Ok(()) } NetworkAddress::ChunkAddress(addr) => { - let files_api = FilesApi::new(client.clone(), wallet_dir.to_path_buf()); - let mut file_download = FilesDownload::new(files_api); - let _ = file_download.download_file(*addr, None).await?; - + client.data_get(*addr.xorname()).await?; Ok(()) } _other => Ok(()), // we don't create/store any other type of content in this test yet diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 1352a24659..8d06a87187 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -1,683 +1,683 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use assert_fs::TempDir; -use assert_matches::assert_matches; -use common::client::{get_client_and_funded_wallet, get_wallet}; -use eyre::{bail, Result}; -use itertools::Itertools; -use sn_logging::LogBuilder; -use sn_networking::NetworkError; -use sn_transfers::{ - get_genesis_sk, rng, DerivationIndex, HotWallet, NanoTokens, SignedTransaction, SpendReason, - WalletError, GENESIS_CASHNOTE, -}; -use std::time::Duration; -use tracing::*; - -#[tokio::test] -async fn cash_note_transfer_double_spend_fail() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - // create 1 wallet add money from faucet - let first_wallet_dir = TempDir::new()?; - - let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; - let first_wallet_balance = first_wallet.balance().as_nano(); - - // create wallet 2 and 3 to receive money from 1 - let second_wallet_dir = TempDir::new()?; - let second_wallet = get_wallet(second_wallet_dir.path()); - assert_eq!(second_wallet.balance(), NanoTokens::zero()); - let third_wallet_dir = TempDir::new()?; - let third_wallet = get_wallet(third_wallet_dir.path()); - assert_eq!(third_wallet.balance(), NanoTokens::zero()); - - // manually forge two transfers of the same source - let amount = NanoTokens::from(first_wallet_balance / 3); - let to1 = first_wallet.address(); - let to2 = second_wallet.address(); - let to3 = third_wallet.address(); - - let (some_cash_notes, _exclusive_access) = first_wallet.available_cash_notes()?; - let same_cash_notes = some_cash_notes.clone(); - - let mut rng = rng::thread_rng(); - - let reason = SpendReason::default(); - let to2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); - let to3_unique_key = (amount, to3, DerivationIndex::random(&mut rng), false); - - let transfer_to_2 = SignedTransaction::new( - some_cash_notes, - vec![to2_unique_key], - to1, - reason.clone(), - first_wallet.key(), - )?; - let transfer_to_3 = SignedTransaction::new( - same_cash_notes, - vec![to3_unique_key], - to1, - reason, - first_wallet.key(), - )?; - - // send both transfers to the network - // upload won't error out, only error out during verification. - info!("Sending both transfers to the network..."); - let res = client.send_spends(transfer_to_2.spends.iter(), false).await; - assert!(res.is_ok()); - let res = client.send_spends(transfer_to_3.spends.iter(), false).await; - assert!(res.is_ok()); - - // we wait 5s to ensure that the double spend attempt is detected and accumulated - info!("Verifying the transfers from first wallet... Sleeping for 10 seconds."); - tokio::time::sleep(Duration::from_secs(10)).await; - - let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); - let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); - - // check the CashNotes, it should fail - let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; - let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; - info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); - assert!(should_err1.is_err() && should_err2.is_err()); - assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - Ok(()) -} - -#[tokio::test] -async fn genesis_double_spend_fail() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - - // create a client and an unused wallet to make sure some money already exists in the system - let first_wallet_dir = TempDir::new()?; - let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; - let first_wallet_addr = first_wallet.address(); - - // create a new genesis wallet with the intention to spend genesis again - let second_wallet_dir = TempDir::new()?; - let mut second_wallet = HotWallet::create_from_key(&second_wallet_dir, get_genesis_sk(), None)?; - second_wallet.deposit_and_store_to_disk(&vec![GENESIS_CASHNOTE.clone()])?; - let genesis_amount = GENESIS_CASHNOTE.value(); - let second_wallet_addr = second_wallet.address(); - - // create a transfer from the second wallet to the first wallet - // this will spend Genesis (again) and transfer its value to the first wallet - let (genesis_cashnote, exclusive_access) = second_wallet.available_cash_notes()?; - let mut rng = rng::thread_rng(); - let recipient = ( - genesis_amount, - first_wallet_addr, - DerivationIndex::random(&mut rng), - false, - ); - let change_addr = second_wallet_addr; - let reason = SpendReason::default(); - let transfer = SignedTransaction::new( - genesis_cashnote, - vec![recipient], - change_addr, - reason, - second_wallet.key(), - )?; - - // send the transfer to the network which will mark genesis as a double spent - // making its direct descendants unspendable - let res = client.send_spends(transfer.spends.iter(), false).await; - std::mem::drop(exclusive_access); - assert!(res.is_ok()); - - // put the bad cashnote in the first wallet - first_wallet.deposit_and_store_to_disk(&transfer.output_cashnotes)?; - - // now try to spend this illegitimate cashnote (direct descendant of double spent genesis) - let (genesis_cashnote_and_others, exclusive_access) = first_wallet.available_cash_notes()?; - let recipient = ( - genesis_amount, - second_wallet_addr, - DerivationIndex::random(&mut rng), - false, - ); - let bad_genesis_descendant = genesis_cashnote_and_others - .iter() - .find(|cn| cn.value() == genesis_amount) - .unwrap() - .clone(); - let change_addr = first_wallet_addr; - let reason = SpendReason::default(); - let transfer2 = SignedTransaction::new( - vec![bad_genesis_descendant], - vec![recipient], - change_addr, - reason, - first_wallet.key(), - )?; - - // send the transfer to the network which should reject it - let res = client.send_spends(transfer2.spends.iter(), false).await; - std::mem::drop(exclusive_access); - assert_matches!(res, Err(WalletError::CouldNotSendMoney(_))); - - Ok(()) -} - -#[tokio::test] -async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - let mut rng = rng::thread_rng(); - let reason = SpendReason::default(); - // create 1 wallet add money from faucet - let wallet_dir_1 = TempDir::new()?; - - let (client, mut wallet_1) = get_client_and_funded_wallet(wallet_dir_1.path()).await?; - let balance_1 = wallet_1.balance().as_nano(); - let amount = NanoTokens::from(balance_1 / 2); - let to1 = wallet_1.address(); - - // Send from 1 -> 2 - let wallet_dir_2 = TempDir::new()?; - let mut wallet_2 = get_wallet(wallet_dir_2.path()); - assert_eq!(wallet_2.balance(), NanoTokens::zero()); - - let to2 = wallet_2.address(); - let (cash_notes_1, _exclusive_access) = wallet_1.available_cash_notes()?; - let to_2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); - let transfer_to_2 = SignedTransaction::new( - cash_notes_1.clone(), - vec![to_2_unique_key], - to1, - reason.clone(), - wallet_1.key(), - )?; - - info!("Sending 1->2 to the network..."); - client - .send_spends(transfer_to_2.spends.iter(), false) - .await?; - - info!("Verifying the transfers from 1 -> 2 wallet..."); - let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_2[0]).await?; - wallet_2.deposit_and_store_to_disk(&cash_notes_for_2)?; // store inside 2 - - // Send from 2 -> 22 - let wallet_dir_22 = TempDir::new()?; - let mut wallet_22 = get_wallet(wallet_dir_22.path()); - assert_eq!(wallet_22.balance(), NanoTokens::zero()); - - let (cash_notes_2, _exclusive_access) = wallet_2.available_cash_notes()?; - assert!(!cash_notes_2.is_empty()); - let to_22_unique_key = ( - wallet_2.balance(), - wallet_22.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_22 = SignedTransaction::new( - cash_notes_2, - vec![to_22_unique_key], - to2, - reason.clone(), - wallet_2.key(), - )?; - - client - .send_spends(transfer_to_22.spends.iter(), false) - .await?; - - info!("Verifying the transfers from 2 -> 22 wallet..."); - let cash_notes_for_22: Vec<_> = transfer_to_22.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_22[0]).await?; - wallet_22.deposit_and_store_to_disk(&cash_notes_for_22)?; // store inside 22 - - // Try to double spend from 1 -> 3 - let wallet_dir_3 = TempDir::new()?; - let wallet_3 = get_wallet(wallet_dir_3.path()); - assert_eq!(wallet_3.balance(), NanoTokens::zero()); - - let to_3_unique_key = ( - amount, - wallet_3.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_3 = SignedTransaction::new( - cash_notes_1, - vec![to_3_unique_key], - to1, - reason.clone(), - wallet_1.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_3.spends.iter(), false) - .await?; - info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); - let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); - assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned - info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); - assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned - - // The old spend has been poisoned, but spends from 22 -> 222 should still work - let wallet_dir_222 = TempDir::new()?; - let wallet_222 = get_wallet(wallet_dir_222.path()); - assert_eq!(wallet_222.balance(), NanoTokens::zero()); - - let (cash_notes_22, _exclusive_access) = wallet_22.available_cash_notes()?; - assert!(!cash_notes_22.is_empty()); - let to_222_unique_key = ( - wallet_22.balance(), - wallet_222.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_222 = SignedTransaction::new( - cash_notes_22, - vec![to_222_unique_key], - wallet_22.address(), - reason, - wallet_22.key(), - )?; - client - .send_spends(transfer_to_222.spends.iter(), false) - .await?; - - info!("Verifying the transfers from 22 -> 222 wallet..."); - let cash_notes_for_222: Vec<_> = transfer_to_222.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_222[0]).await?; - - // finally assert that we have a double spend attempt error here - // we wait 1s to ensure that the double spend attempt is detected and accumulated - tokio::time::sleep(Duration::from_secs(5)).await; - - match client.verify_cashnote(&cash_notes_for_2[0]).await { - Ok(_) => bail!("Cashnote verification should have failed"), - Err(e) => { - assert!( - e.to_string() - .contains("Network Error Double spend(s) attempt was detected"), - "error should reflect double spend attempt", - ); - } - } - - match client.verify_cashnote(&cash_notes_for_3[0]).await { - Ok(_) => bail!("Cashnote verification should have failed"), - Err(e) => { - assert!( - e.to_string() - .contains("Network Error Double spend(s) attempt was detected"), - "error should reflect double spend attempt", - ); - } - } - Ok(()) -} - -#[tokio::test] -/// When A -> B -> C where C is the UTXO cashnote, then double spending A and then double spending B should lead to C -/// being invalid. -async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - let mut rng = rng::thread_rng(); - let reason = SpendReason::default(); - // create 1 wallet add money from faucet - let wallet_dir_a = TempDir::new()?; - - let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; - let balance_a = wallet_a.balance().as_nano(); - let amount = NanoTokens::from(balance_a / 2); - - // Send from A -> B - let wallet_dir_b = TempDir::new()?; - let mut wallet_b = get_wallet(wallet_dir_b.path()); - assert_eq!(wallet_b.balance(), NanoTokens::zero()); - - let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; - let to_b_unique_key = ( - amount, - wallet_b.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_b = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_b_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; - - info!("Sending A->B to the network..."); - client - .send_spends(transfer_to_b.spends.iter(), false) - .await?; - - info!("Verifying the transfers from A -> B wallet..."); - let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_b[0]).await?; - wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B - - // Send from B -> C - let wallet_dir_c = TempDir::new()?; - let mut wallet_c = get_wallet(wallet_dir_c.path()); - assert_eq!(wallet_c.balance(), NanoTokens::zero()); - - let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; - assert!(!cash_notes_b.is_empty()); - let to_c_unique_key = ( - wallet_b.balance(), - wallet_c.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_c = SignedTransaction::new( - cash_notes_b.clone(), - vec![to_c_unique_key], - wallet_b.address(), - reason.clone(), - wallet_b.key(), - )?; - - info!("spend B to C: {:?}", transfer_to_c.spends); - client - .send_spends(transfer_to_c.spends.iter(), false) - .await?; - - info!("Verifying the transfers from B -> C wallet..."); - let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_c[0]).await?; - wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c - - // Try to double spend from A -> X - let wallet_dir_x = TempDir::new()?; - let wallet_x = get_wallet(wallet_dir_x.path()); - assert_eq!(wallet_x.balance(), NanoTokens::zero()); - - let to_x_unique_key = ( - amount, - wallet_x.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_x = SignedTransaction::new( - cash_notes_a, - vec![to_x_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_x.spends.iter(), false) - .await?; - info!("Verifying the transfers from A -> X wallet... It should error out."); - let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); - let result = client.verify_cashnote(&cash_notes_for_x[0]).await; - info!("Got result while verifying double spend from A -> X: {result:?}"); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_secs(10)).await; - - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); // poisoned - - // Try to double spend from B -> Y - let wallet_dir_y = TempDir::new()?; - let wallet_y = get_wallet(wallet_dir_y.path()); - assert_eq!(wallet_y.balance(), NanoTokens::zero()); - - let to_y_unique_key = ( - amount, - wallet_y.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_y = SignedTransaction::new( - cash_notes_b, - vec![to_y_unique_key], - wallet_b.address(), - reason.clone(), - wallet_b.key(), - )?; // reuse the old cash notes - - info!("spend B to Y: {:?}", transfer_to_y.spends); - client - .send_spends(transfer_to_y.spends.iter(), false) - .await?; - let spend_b_to_y = transfer_to_y.spends.first().expect("should have one"); - let b_spends = client.get_spend_from_network(spend_b_to_y.address()).await; - info!("B spends: {b_spends:?}"); - - info!("Verifying the transfers from B -> Y wallet... It should error out."); - let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_secs(30)).await; - - let result = client.verify_cashnote(&cash_notes_for_y[0]).await; - info!("Got result while verifying double spend from B -> Y: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - info!("Verifying the original cashnote of A -> B"); - let result = client.verify_cashnote(&cash_notes_for_b[0]).await; - info!("Got result while verifying the original spend from A -> B: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - info!("Verifying the original cashnote of B -> C"); - let result = client.verify_cashnote(&cash_notes_for_c[0]).await; - info!("Got result while verifying the original spend from B -> C: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }, "result should be verify error, it was {result:?}"); - - let result = client.verify_cashnote(&cash_notes_for_y[0]).await; - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }, "result should be verify error, it was {result:?}"); - let result = client.verify_cashnote(&cash_notes_for_b[0]).await; - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }, "result should be verify error, it was {result:?}"); - - Ok(()) -} - -#[tokio::test] -/// When A -> B -> C where C is the UTXO cashnote, double spending A many times over and over -/// should not lead to the original A disappearing and B becoming orphan -async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - let mut rng = rng::thread_rng(); - let reason = SpendReason::default(); - // create 1 wallet add money from faucet - let wallet_dir_a = TempDir::new()?; - - let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; - let balance_a = wallet_a.balance().as_nano(); - let amount = NanoTokens::from(balance_a / 2); - - // Send from A -> B - let wallet_dir_b = TempDir::new()?; - let mut wallet_b = get_wallet(wallet_dir_b.path()); - assert_eq!(wallet_b.balance(), NanoTokens::zero()); - - let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; - let to_b_unique_key = ( - amount, - wallet_b.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_b = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_b_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; - - info!("Sending A->B to the network..."); - client - .send_spends(transfer_to_b.spends.iter(), false) - .await?; - - // save original A spend - let vec_of_spends = transfer_to_b.spends.into_iter().collect::>(); - let original_a_spend = if let [spend] = vec_of_spends.as_slice() { - spend - } else { - panic!("Expected to have one spend here!"); - }; - - info!("Verifying the transfers from A -> B wallet..."); - let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_b[0]).await?; - wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B - - // Send from B -> C - let wallet_dir_c = TempDir::new()?; - let mut wallet_c = get_wallet(wallet_dir_c.path()); - assert_eq!(wallet_c.balance(), NanoTokens::zero()); - - let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; - assert!(!cash_notes_b.is_empty()); - let to_c_unique_key = ( - wallet_b.balance(), - wallet_c.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_c = SignedTransaction::new( - cash_notes_b.clone(), - vec![to_c_unique_key], - wallet_b.address(), - reason.clone(), - wallet_b.key(), - )?; - - client - .send_spends(transfer_to_c.spends.iter(), false) - .await?; - - info!("Verifying the transfers from B -> C wallet..."); - let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_c[0]).await?; - wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c - - // Try to double spend from A -> X - let wallet_dir_x = TempDir::new()?; - let wallet_x = get_wallet(wallet_dir_x.path()); - assert_eq!(wallet_x.balance(), NanoTokens::zero()); - - let to_x_unique_key = ( - amount, - wallet_x.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_x = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_x_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_x.spends.iter(), false) - .await?; - info!("Verifying the transfers from A -> X wallet... It should error out."); - let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_secs(15)).await; - - let result = client.verify_cashnote(&cash_notes_for_x[0]).await; - info!("Got result while verifying double spend from A -> X: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - // the original A should still be present as one of the double spends - let res = client - .get_spend_from_network(original_a_spend.address()) - .await; - assert_matches!( - res, - Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( - _ - ))) - ); - if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { - assert!(spends.iter().contains(original_a_spend)) - } - - // Try to double spend A -> n different random keys - for _ in 0..20 { - info!("Spamming double spends on A"); - let wallet_dir_y = TempDir::new()?; - let wallet_y = get_wallet(wallet_dir_y.path()); - assert_eq!(wallet_y.balance(), NanoTokens::zero()); - - let to_y_unique_key = ( - amount, - wallet_y.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_y = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_y_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_y.spends.iter(), false) - .await?; - info!("Verifying the transfers from A -> Y wallet... It should error out."); - let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_millis(500)).await; - - let result = client.verify_cashnote(&cash_notes_for_y[0]).await; - info!("Got result while verifying double spend from A -> Y: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - // the original A should still be present as one of the double spends - let res = client - .get_spend_from_network(original_a_spend.address()) - .await; - assert_matches!( - res, - Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( - _ - ))) - ); - if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { - assert!(spends.iter().contains(original_a_spend)) - } - } - - Ok(()) -} +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use assert_fs::TempDir; +// use assert_matches::assert_matches; +// use common::client::{get_client_and_funded_wallet, get_wallet}; +// use eyre::{bail, Result}; +// use itertools::Itertools; +// use sn_transfers::{ +// get_genesis_sk, rng, NanoTokens, DerivationIndex, HotWallet, SignedTransaction, +// SpendReason, WalletError, GENESIS_CASHNOTE, +// }; +// use sn_logging::LogBuilder; +// use sn_networking::NetworkError; +// use std::time::Duration; +// use tracing::*; + +// #[tokio::test] +// async fn cash_note_transfer_double_spend_fail() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// // create 1 wallet add money from faucet +// let first_wallet_dir = TempDir::new()?; + +// let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; +// let first_wallet_balance = first_wallet.balance().as_nano(); + +// // create wallet 2 and 3 to receive money from 1 +// let second_wallet_dir = TempDir::new()?; +// let second_wallet = get_wallet(second_wallet_dir.path()); +// assert_eq!(second_wallet.balance(), NanoTokens::zero()); +// let third_wallet_dir = TempDir::new()?; +// let third_wallet = get_wallet(third_wallet_dir.path()); +// assert_eq!(third_wallet.balance(), NanoTokens::zero()); + +// // manually forge two transfers of the same source +// let amount = first_wallet_balance / 3; +// let to1 = first_wallet.address(); +// let to2 = second_wallet.address(); +// let to3 = third_wallet.address(); + +// let (some_cash_notes, _exclusive_access) = first_wallet.available_cash_notes()?; +// let same_cash_notes = some_cash_notes.clone(); + +// let mut rng = rng::thread_rng(); + +// let reason = SpendReason::default(); +// let to2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); +// let to3_unique_key = (amount, to3, DerivationIndex::random(&mut rng), false); + +// let transfer_to_2 = SignedTransaction::new( +// some_cash_notes, +// vec![to2_unique_key], +// to1, +// reason.clone(), +// first_wallet.key(), +// )?; +// let transfer_to_3 = SignedTransaction::new( +// same_cash_notes, +// vec![to3_unique_key], +// to1, +// reason, +// first_wallet.key(), +// )?; + +// // send both transfers to the network +// // upload won't error out, only error out during verification. +// info!("Sending both transfers to the network..."); +// let res = client.send_spends(transfer_to_2.spends.iter(), false).await; +// assert!(res.is_ok()); +// let res = client.send_spends(transfer_to_3.spends.iter(), false).await; +// assert!(res.is_ok()); + +// // we wait 5s to ensure that the double spend attempt is detected and accumulated +// info!("Verifying the transfers from first wallet... Sleeping for 10 seconds."); +// tokio::time::sleep(Duration::from_secs(10)).await; + +// let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); +// let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); + +// // check the CashNotes, it should fail +// let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; +// let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; +// info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); +// assert!(should_err1.is_err() && should_err2.is_err()); +// assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); +// assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// Ok(()) +// } + +// #[tokio::test] +// async fn genesis_double_spend_fail() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); + +// // create a client and an unused wallet to make sure some money already exists in the system +// let first_wallet_dir = TempDir::new()?; +// let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; +// let first_wallet_addr = first_wallet.address(); + +// // create a new genesis wallet with the intention to spend genesis again +// let second_wallet_dir = TempDir::new()?; +// let mut second_wallet = HotWallet::create_from_key(&second_wallet_dir, get_genesis_sk(), None)?; +// second_wallet.deposit_and_store_to_disk(&vec![GENESIS_CASHNOTE.clone()])?; +// let genesis_amount = GENESIS_CASHNOTE.value(); +// let second_wallet_addr = second_wallet.address(); + +// // create a transfer from the second wallet to the first wallet +// // this will spend Genesis (again) and transfer its value to the first wallet +// let (genesis_cashnote, exclusive_access) = second_wallet.available_cash_notes()?; +// let mut rng = rng::thread_rng(); +// let recipient = ( +// genesis_amount, +// first_wallet_addr, +// DerivationIndex::random(&mut rng), +// false, +// ); +// let change_addr = second_wallet_addr; +// let reason = SpendReason::default(); +// let transfer = SignedTransaction::new( +// genesis_cashnote, +// vec![recipient], +// change_addr, +// reason, +// second_wallet.key(), +// )?; + +// // send the transfer to the network which will mark genesis as a double spent +// // making its direct descendants unspendable +// let res = client.send_spends(transfer.spends.iter(), false).await; +// std::mem::drop(exclusive_access); +// assert!(res.is_ok()); + +// // put the bad cashnote in the first wallet +// first_wallet.deposit_and_store_to_disk(&transfer.output_cashnotes)?; + +// // now try to spend this illegitimate cashnote (direct descendant of double spent genesis) +// let (genesis_cashnote_and_others, exclusive_access) = first_wallet.available_cash_notes()?; +// let recipient = ( +// genesis_amount, +// second_wallet_addr, +// DerivationIndex::random(&mut rng), +// false, +// ); +// let bad_genesis_descendant = genesis_cashnote_and_others +// .iter() +// .find(|cn| cn.value() == genesis_amount) +// .unwrap() +// .clone(); +// let change_addr = first_wallet_addr; +// let reason = SpendReason::default(); +// let transfer2 = SignedTransaction::new( +// vec![bad_genesis_descendant], +// vec![recipient], +// change_addr, +// reason, +// first_wallet.key(), +// )?; + +// // send the transfer to the network which should reject it +// let res = client.send_spends(transfer2.spends.iter(), false).await; +// std::mem::drop(exclusive_access); +// assert_matches!(res, Err(WalletError::CouldNotSendMoney(_))); + +// Ok(()) +// } + +// #[tokio::test] +// async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let mut rng = rng::thread_rng(); +// let reason = SpendReason::default(); +// // create 1 wallet add money from faucet +// let wallet_dir_1 = TempDir::new()?; + +// let (client, mut wallet_1) = get_client_and_funded_wallet(wallet_dir_1.path()).await?; +// let balance_1 = wallet_1.balance(); +// let amount = balance_1 / 2; +// let to1 = wallet_1.address(); + +// // Send from 1 -> 2 +// let wallet_dir_2 = TempDir::new()?; +// let mut wallet_2 = get_wallet(wallet_dir_2.path()); +// assert_eq!(wallet_2.balance(), NanoTokens::zero()); + +// let to2 = wallet_2.address(); +// let (cash_notes_1, _exclusive_access) = wallet_1.available_cash_notes()?; +// let to_2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); +// let transfer_to_2 = SignedTransaction::new( +// cash_notes_1.clone(), +// vec![to_2_unique_key], +// to1, +// reason.clone(), +// wallet_1.key(), +// )?; + +// info!("Sending 1->2 to the network..."); +// client +// .send_spends(transfer_to_2.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from 1 -> 2 wallet..."); +// let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_2[0]).await?; +// wallet_2.deposit_and_store_to_disk(&cash_notes_for_2)?; // store inside 2 + +// // Send from 2 -> 22 +// let wallet_dir_22 = TempDir::new()?; +// let mut wallet_22 = get_wallet(wallet_dir_22.path()); +// assert_eq!(wallet_22.balance(), NanoTokens::zero()); + +// let (cash_notes_2, _exclusive_access) = wallet_2.available_cash_notes()?; +// assert!(!cash_notes_2.is_empty()); +// let to_22_unique_key = ( +// wallet_2.balance(), +// wallet_22.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_22 = SignedTransaction::new( +// cash_notes_2, +// vec![to_22_unique_key], +// to2, +// reason.clone(), +// wallet_2.key(), +// )?; + +// client +// .send_spends(transfer_to_22.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from 2 -> 22 wallet..."); +// let cash_notes_for_22: Vec<_> = transfer_to_22.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_22[0]).await?; +// wallet_22.deposit_and_store_to_disk(&cash_notes_for_22)?; // store inside 22 + +// // Try to double spend from 1 -> 3 +// let wallet_dir_3 = TempDir::new()?; +// let wallet_3 = get_wallet(wallet_dir_3.path()); +// assert_eq!(wallet_3.balance(), NanoTokens::zero()); + +// let to_3_unique_key = ( +// amount, +// wallet_3.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_3 = SignedTransaction::new( +// cash_notes_1, +// vec![to_3_unique_key], +// to1, +// reason.clone(), +// wallet_1.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_3.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); +// let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); +// assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned +// info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); +// assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned + +// // The old spend has been poisoned, but spends from 22 -> 222 should still work +// let wallet_dir_222 = TempDir::new()?; +// let wallet_222 = get_wallet(wallet_dir_222.path()); +// assert_eq!(wallet_222.balance(), NanoTokens::zero()); + +// let (cash_notes_22, _exclusive_access) = wallet_22.available_cash_notes()?; +// assert!(!cash_notes_22.is_empty()); +// let to_222_unique_key = ( +// wallet_22.balance(), +// wallet_222.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_222 = SignedTransaction::new( +// cash_notes_22, +// vec![to_222_unique_key], +// wallet_22.address(), +// reason, +// wallet_22.key(), +// )?; +// client +// .send_spends(transfer_to_222.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from 22 -> 222 wallet..."); +// let cash_notes_for_222: Vec<_> = transfer_to_222.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_222[0]).await?; + +// // finally assert that we have a double spend attempt error here +// // we wait 1s to ensure that the double spend attempt is detected and accumulated +// tokio::time::sleep(Duration::from_secs(5)).await; + +// match client.verify_cashnote(&cash_notes_for_2[0]).await { +// Ok(_) => bail!("Cashnote verification should have failed"), +// Err(e) => { +// assert!( +// e.to_string() +// .contains("Network Error Double spend(s) attempt was detected"), +// "error should reflect double spend attempt", +// ); +// } +// } + +// match client.verify_cashnote(&cash_notes_for_3[0]).await { +// Ok(_) => bail!("Cashnote verification should have failed"), +// Err(e) => { +// assert!( +// e.to_string() +// .contains("Network Error Double spend(s) attempt was detected"), +// "error should reflect double spend attempt", +// ); +// } +// } +// Ok(()) +// } + +// #[tokio::test] +// /// When A -> B -> C where C is the UTXO cashnote, then double spending A and then double spending B should lead to C +// /// being invalid. +// async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let mut rng = rng::thread_rng(); +// let reason = SpendReason::default(); +// // create 1 wallet add money from faucet +// let wallet_dir_a = TempDir::new()?; + +// let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; +// let balance_a = wallet_a.balance().as_nano(); +// let amount = balance_a / 2; + +// // Send from A -> B +// let wallet_dir_b = TempDir::new()?; +// let mut wallet_b = get_wallet(wallet_dir_b.path()); +// assert_eq!(wallet_b.balance(), NanoTokens::zero()); + +// let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; +// let to_b_unique_key = ( +// amount, +// wallet_b.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_b = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_b_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; + +// info!("Sending A->B to the network..."); +// client +// .send_spends(transfer_to_b.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from A -> B wallet..."); +// let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_b[0]).await?; +// wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B + +// // Send from B -> C +// let wallet_dir_c = TempDir::new()?; +// let mut wallet_c = get_wallet(wallet_dir_c.path()); +// assert_eq!(wallet_c.balance(), NanoTokens::zero()); + +// let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; +// assert!(!cash_notes_b.is_empty()); +// let to_c_unique_key = ( +// wallet_b.balance(), +// wallet_c.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_c = SignedTransaction::new( +// cash_notes_b.clone(), +// vec![to_c_unique_key], +// wallet_b.address(), +// reason.clone(), +// wallet_b.key(), +// )?; + +// info!("spend B to C: {:?}", transfer_to_c.spends); +// client +// .send_spends(transfer_to_c.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from B -> C wallet..."); +// let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_c[0]).await?; +// wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c + +// // Try to double spend from A -> X +// let wallet_dir_x = TempDir::new()?; +// let wallet_x = get_wallet(wallet_dir_x.path()); +// assert_eq!(wallet_x.balance(), NanoTokens::zero()); + +// let to_x_unique_key = ( +// amount, +// wallet_x.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_x = SignedTransaction::new( +// cash_notes_a, +// vec![to_x_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_x.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from A -> X wallet... It should error out."); +// let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); +// let result = client.verify_cashnote(&cash_notes_for_x[0]).await; +// info!("Got result while verifying double spend from A -> X: {result:?}"); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_secs(10)).await; + +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); // poisoned + +// // Try to double spend from B -> Y +// let wallet_dir_y = TempDir::new()?; +// let wallet_y = get_wallet(wallet_dir_y.path()); +// assert_eq!(wallet_y.balance(), NanoTokens::zero()); + +// let to_y_unique_key = ( +// amount, +// wallet_y.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_y = SignedTransaction::new( +// cash_notes_b, +// vec![to_y_unique_key], +// wallet_b.address(), +// reason.clone(), +// wallet_b.key(), +// )?; // reuse the old cash notes + +// info!("spend B to Y: {:?}", transfer_to_y.spends); +// client +// .send_spends(transfer_to_y.spends.iter(), false) +// .await?; +// let spend_b_to_y = transfer_to_y.spends.first().expect("should have one"); +// let b_spends = client.get_spend_from_network(spend_b_to_y.address()).await; +// info!("B spends: {b_spends:?}"); + +// info!("Verifying the transfers from B -> Y wallet... It should error out."); +// let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_secs(30)).await; + +// let result = client.verify_cashnote(&cash_notes_for_y[0]).await; +// info!("Got result while verifying double spend from B -> Y: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// info!("Verifying the original cashnote of A -> B"); +// let result = client.verify_cashnote(&cash_notes_for_b[0]).await; +// info!("Got result while verifying the original spend from A -> B: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// info!("Verifying the original cashnote of B -> C"); +// let result = client.verify_cashnote(&cash_notes_for_c[0]).await; +// info!("Got result while verifying the original spend from B -> C: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); + +// let result = client.verify_cashnote(&cash_notes_for_y[0]).await; +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); +// let result = client.verify_cashnote(&cash_notes_for_b[0]).await; +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); + +// Ok(()) +// } + +// #[tokio::test] +// /// When A -> B -> C where C is the UTXO cashnote, double spending A many times over and over +// /// should not lead to the original A disappearing and B becoming orphan +// async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let mut rng = rng::thread_rng(); +// let reason = SpendReason::default(); +// // create 1 wallet add money from faucet +// let wallet_dir_a = TempDir::new()?; + +// let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; +// let balance_a = wallet_a.balance(); +// let amount = balance_a / 2; + +// // Send from A -> B +// let wallet_dir_b = TempDir::new()?; +// let mut wallet_b = get_wallet(wallet_dir_b.path()); +// assert_eq!(wallet_b.balance(), NanoTokens::zero()); + +// let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; +// let to_b_unique_key = ( +// amount, +// wallet_b.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_b = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_b_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; + +// info!("Sending A->B to the network..."); +// client +// .send_spends(transfer_to_b.spends.iter(), false) +// .await?; + +// // save original A spend +// let vec_of_spends = transfer_to_b.spends.into_iter().collect::>(); +// let original_a_spend = if let [spend] = vec_of_spends.as_slice() { +// spend +// } else { +// panic!("Expected to have one spend here!"); +// }; + +// info!("Verifying the transfers from A -> B wallet..."); +// let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_b[0]).await?; +// wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B + +// // Send from B -> C +// let wallet_dir_c = TempDir::new()?; +// let mut wallet_c = get_wallet(wallet_dir_c.path()); +// assert_eq!(wallet_c.balance(), NanoTokens::zero()); + +// let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; +// assert!(!cash_notes_b.is_empty()); +// let to_c_unique_key = ( +// wallet_b.balance(), +// wallet_c.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_c = SignedTransaction::new( +// cash_notes_b.clone(), +// vec![to_c_unique_key], +// wallet_b.address(), +// reason.clone(), +// wallet_b.key(), +// )?; + +// client +// .send_spends(transfer_to_c.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from B -> C wallet..."); +// let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_c[0]).await?; +// wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c + +// // Try to double spend from A -> X +// let wallet_dir_x = TempDir::new()?; +// let wallet_x = get_wallet(wallet_dir_x.path()); +// assert_eq!(wallet_x.balance(), NanoTokens::zero()); + +// let to_x_unique_key = ( +// amount, +// wallet_x.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_x = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_x_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_x.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from A -> X wallet... It should error out."); +// let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_secs(15)).await; + +// let result = client.verify_cashnote(&cash_notes_for_x[0]).await; +// info!("Got result while verifying double spend from A -> X: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// // the original A should still be present as one of the double spends +// let res = client +// .get_spend_from_network(original_a_spend.address()) +// .await; +// assert_matches!( +// res, +// Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( +// _ +// ))) +// ); +// if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { +// assert!(spends.iter().contains(original_a_spend)) +// } + +// // Try to double spend A -> n different random keys +// for _ in 0..20 { +// info!("Spamming double spends on A"); +// let wallet_dir_y = TempDir::new()?; +// let wallet_y = get_wallet(wallet_dir_y.path()); +// assert_eq!(wallet_y.balance(), NanoTokens::zero()); + +// let to_y_unique_key = ( +// amount, +// wallet_y.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_y = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_y_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_y.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from A -> Y wallet... It should error out."); +// let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_millis(500)).await; + +// let result = client.verify_cashnote(&cash_notes_for_y[0]).await; +// info!("Got result while verifying double spend from A -> Y: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// // the original A should still be present as one of the double spends +// let res = client +// .get_spend_from_network(original_a_spend.address()) +// .await; +// assert_matches!( +// res, +// Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( +// _ +// ))) +// ); +// if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { +// assert!(spends.iter().contains(original_a_spend)) +// } +// } + +// Ok(()) +// } diff --git a/sn_node/tests/sequential_transfers.rs b/sn_node/tests/sequential_transfers.rs index 66d69337c8..d6906e37d1 100644 --- a/sn_node/tests/sequential_transfers.rs +++ b/sn_node/tests/sequential_transfers.rs @@ -1,54 +1,54 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use assert_fs::TempDir; -use common::client::{get_client_and_funded_wallet, get_wallet}; -use eyre::Result; -use sn_client::send; -use sn_logging::LogBuilder; -use sn_transfers::NanoTokens; -use tracing::info; - -#[tokio::test] -async fn cash_note_transfer_multiple_sequential_succeed() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("sequential_transfer", true); - - let first_wallet_dir = TempDir::new()?; - - let (client, first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; - let first_wallet_balance = first_wallet.balance().as_nano(); - - let second_wallet_balance = NanoTokens::from(first_wallet_balance / 2); - info!("Transferring from first wallet to second wallet: {second_wallet_balance}."); - let second_wallet_dir = TempDir::new()?; - let mut second_wallet = get_wallet(second_wallet_dir.path()); - - assert_eq!(second_wallet.balance(), NanoTokens::zero()); - - let tokens = send( - first_wallet, - second_wallet_balance, - second_wallet.address(), - &client, - true, - ) - .await?; - info!("Verifying the transfer from first wallet..."); - - client.verify_cashnote(&tokens).await?; - second_wallet.deposit_and_store_to_disk(&vec![tokens])?; - assert_eq!(second_wallet.balance(), second_wallet_balance); - info!("CashNotes deposited to second wallet: {second_wallet_balance}."); - - let first_wallet = get_wallet(&first_wallet_dir); - assert!(second_wallet_balance.as_nano() == first_wallet.balance().as_nano()); - - Ok(()) -} +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use assert_fs::TempDir; +// use common::client::{get_client_and_funded_wallet, get_wallet}; +// use eyre::Result; +// use sn_client::send; +// use sn_logging::LogBuilder; +// use sn_transfers::NanoTokens; +// use tracing::info; + +// #[tokio::test] +// async fn cash_note_transfer_multiple_sequential_succeed() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("sequential_transfer", true); + +// let first_wallet_dir = TempDir::new()?; + +// let (client, first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; +// let first_wallet_balance:NanoTokens = first_wallet.balance(); + +// let second_wallet_balance = first_wallet_balance / 2; +// info!("Transferring from first wallet to second wallet: {second_wallet_balance}."); +// let second_wallet_dir = TempDir::new()?; +// let mut second_wallet = get_wallet(second_wallet_dir.path()); + +// assert_eq!(second_wallet.balance(), NanoTokens::zero()); + +// let tokens = send( +// first_wallet, +// second_wallet_balance, +// second_wallet.address(), +// &client, +// true, +// ) +// .await?; +// info!("Verifying the transfer from first wallet..."); + +// client.verify_cashnote(&tokens).await?; +// second_wallet.deposit_and_store_to_disk(&vec![tokens])?; +// assert_eq!(second_wallet.balance(), second_wallet_balance); +// info!("CashNotes deposited to second wallet: {second_wallet_balance}."); + +// let first_wallet = get_wallet(&first_wallet_dir); +// assert!(second_wallet_balance.as_atto() == first_wallet.balance().as_atto()); + +// Ok(()) +// } diff --git a/sn_node/tests/storage_payments.rs b/sn_node/tests/storage_payments.rs index 57e63f05b6..23fe9c53b0 100644 --- a/sn_node/tests/storage_payments.rs +++ b/sn_node/tests/storage_payments.rs @@ -1,399 +1,425 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use crate::common::{client::get_client_and_funded_wallet, random_content}; -use assert_fs::TempDir; -use eyre::{eyre, Result}; -use libp2p::PeerId; -use rand::Rng; -use sn_client::{Error as ClientError, FilesDownload, Uploader, WalletClient}; -use sn_logging::LogBuilder; -use sn_networking::{GetRecordError, NetworkError}; -use sn_protocol::{ - error::Error as ProtocolError, - storage::{ChunkAddress, RegisterAddress}, - NetworkAddress, -}; -use sn_registers::Permissions; -use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote}; -use std::collections::BTreeMap; -use tokio::time::{sleep, Duration}; -use tracing::info; -use xor_name::XorName; - -#[tokio::test] -async fn storage_payment_succeeds() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - let balance_before = paying_wallet.balance(); - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - // generate a random number (between 50 and 100) of random addresses - let mut rng = rand::thread_rng(); - let random_content_addrs = (0..rng.gen_range(50..100)) - .map(|_| { - sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) - }) - .collect::>(); - info!( - "Paying for {} random addresses...", - random_content_addrs.len() - ); - - let _cost = wallet_client - .pay_for_storage(random_content_addrs.clone().into_iter()) - .await?; - - info!("Verifying balance has been paid from the wallet..."); - - let paying_wallet = wallet_client.into_wallet(); - assert!( - paying_wallet.balance() < balance_before, - "balance should have decreased after payment" - ); - - Ok(()) -} - -#[tokio::test] -async fn storage_payment_fails_with_insufficient_money() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir: TempDir = TempDir::new()?; - let chunks_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - let (files_api, content_bytes, _random_content_addrs, chunks) = - random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; - - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - let subset_len = chunks.len() / 3; - let _storage_cost = wallet_client - .pay_for_storage( - chunks - .clone() - .into_iter() - .take(subset_len) - .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(name))), - ) - .await?; - - // now let's request to upload all addresses, even that we've already paid for a subset of them - let verify_store = false; - let res = files_api - .upload_test_bytes(content_bytes.clone(), verify_store) - .await; - assert!( - res.is_err(), - "Should have failed to store as we didnt pay for everything" - ); - Ok(()) -} - -// TODO: reenable -#[ignore = "Currently we do not cache the proofs in the wallet"] -#[tokio::test] -async fn storage_payment_proofs_cached_in_wallet() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir: TempDir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let wallet_original_balance = paying_wallet.balance().as_nano(); - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - // generate a random number (between 50 and 100) of random addresses - let mut rng = rand::thread_rng(); - let random_content_addrs = (0..rng.gen_range(50..100)) - .map(|_| { - sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) - }) - .collect::>(); - - // let's first pay only for a subset of the addresses - let subset_len = random_content_addrs.len() / 3; - info!("Paying for {subset_len} random addresses...",); - let storage_payment_result = wallet_client - .pay_for_storage(random_content_addrs.clone().into_iter().take(subset_len)) - .await?; - - let total_cost = storage_payment_result - .storage_cost - .checked_add(storage_payment_result.royalty_fees) - .ok_or(eyre!("Total storage cost exceed possible token amount"))?; - - // check we've paid only for the subset of addresses, 1 nano per addr - let new_balance = NanoTokens::from(wallet_original_balance - total_cost.as_nano()); - info!("Verifying new balance on paying wallet is {new_balance} ..."); - let paying_wallet = wallet_client.into_wallet(); - assert_eq!(paying_wallet.balance(), new_balance); - - // let's verify payment proofs for the subset have been cached in the wallet - assert!(random_content_addrs - .iter() - .take(subset_len) - .all(|name| paying_wallet - .api() - .get_recent_payment(&name.as_xorname().unwrap()) - .is_ok())); - - // now let's request to pay for all addresses, even that we've already paid for a subset of them - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - let storage_payment_result = wallet_client - .pay_for_storage(random_content_addrs.clone().into_iter()) - .await?; - let total_cost = storage_payment_result - .storage_cost - .checked_add(storage_payment_result.royalty_fees) - .ok_or(eyre!("Total storage cost exceed possible token amount"))?; - - // check we've paid only for addresses we haven't previously paid for, 1 nano per addr - let new_balance = NanoTokens::from( - wallet_original_balance - (random_content_addrs.len() as u64 * total_cost.as_nano()), - ); - println!("Verifying new balance on paying wallet is now {new_balance} ..."); - let paying_wallet = wallet_client.into_wallet(); - assert_eq!(paying_wallet.balance(), new_balance); - - // let's verify payment proofs now for all addresses have been cached in the wallet - // assert!(random_content_addrs - // .iter() - // .all(|name| paying_wallet.get_payment_unique_pubkeys(name) == transfer_outputs_map.get(name))); - - Ok(()) -} - -#[tokio::test] -async fn storage_payment_chunk_upload_succeeds() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - let chunks_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let (files_api, _content_bytes, file_addr, chunks) = - random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; - - info!("Paying for {} random addresses...", chunks.len()); - - let _cost = wallet_client - .pay_for_storage( - chunks - .iter() - .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(*name))), - ) - .await?; - - let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.to_path_buf()); - uploader.set_show_holders(true); - uploader.insert_chunk_paths(chunks); - let _upload_stats = uploader.start_upload().await?; - - let mut files_download = FilesDownload::new(files_api); - let _ = files_download.download_file(file_addr, None).await?; - - Ok(()) -} - -#[ignore = "This test sends out invalid 0 transactions and needs to be fixed"] -#[tokio::test] -async fn storage_payment_chunk_upload_fails_if_no_tokens_sent() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - let chunks_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let (files_api, content_bytes, content_addr, chunks) = - random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; - - let mut no_data_payments = BTreeMap::default(); - for (chunk_name, _) in chunks.iter() { - no_data_payments.insert( - *chunk_name, - ( - MainPubkey::new(bls::SecretKey::random().public_key()), - PaymentQuote::test_dummy(*chunk_name, NanoTokens::from(0)), - PeerId::random().to_bytes(), - ), - ); - } - - let _ = wallet_client - .mut_wallet() - .local_send_storage_payment(&no_data_payments)?; - - sleep(Duration::from_secs(5)).await; - - files_api - .upload_test_bytes(content_bytes.clone(), false) - .await?; +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use crate::common::{client::get_client_and_funded_wallet, random_content}; +// use assert_fs::TempDir; +// use eyre::{eyre, Result}; +// use libp2p::PeerId; +// use rand::Rng; +// use sn_client::{Error as ClientError, FilesDownload, Uploader, WalletClient}; +// use sn_evm::{Amount, AttoTokens, PaymentQuote}; +// use sn_logging::LogBuilder; +// use sn_networking::{GetRecordError, NetworkError}; +// use sn_protocol::{ +// error::Error as ProtocolError, +// storage::{ChunkAddress, RegisterAddress}, +// NetworkAddress, +// }; +// use sn_registers::Permissions; +// use std::collections::BTreeMap; +// use tokio::time::{sleep, Duration}; +// use tracing::info; +// use xor_name::XorName; + +// #[tokio::test] +// async fn storage_payment_succeeds() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + +// let balance_before = paying_wallet.balance(); +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// // generate a random number (between 50 and 100) of random addresses +// let mut rng = rand::thread_rng(); +// let random_content_addrs = (0..rng.gen_range(50..100)) +// .map(|_| { +// sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) +// }) +// .collect::>(); +// info!( +// "Paying for {} random addresses...", +// random_content_addrs.len() +// ); + +// let _cost = wallet_client +// .pay_for_storage(random_content_addrs.clone().into_iter()) +// .await?; + +// info!("Verifying balance has been paid from the wallet..."); + +// let paying_wallet = wallet_client.into_wallet(); +// assert!( +// paying_wallet.balance() < balance_before, +// "balance should have decreased after payment" +// ); + +// Ok(()) +// } + +// #[tokio::test] +// async fn storage_payment_fails_with_insufficient_money() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir: TempDir = TempDir::new()?; +// let chunks_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + +// let (files_api, content_bytes, _random_content_addrs, chunks) = +// random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; + +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let subset_len = chunks.len() / 3; +// let _storage_cost = wallet_client +// .pay_for_storage( +// chunks +// .clone() +// .into_iter() +// .take(subset_len) +// .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(name))), +// ) +// .await?; + +// // now let's request to upload all addresses, even that we've already paid for a subset of them +// let verify_store = false; +// let res = files_api +// .upload_test_bytes(content_bytes.clone(), verify_store) +// .await; +// assert!( +// res.is_err(), +// "Should have failed to store as we didnt pay for everything" +// ); +// Ok(()) +// } + +// // TODO: reenable +// #[ignore = "Currently we do not cache the proofs in the wallet"] +// #[tokio::test] +// async fn storage_payment_proofs_cached_in_wallet() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir: TempDir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let wallet_original_balance = paying_wallet.balance().as_atto(); +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// // generate a random number (between 50 and 100) of random addresses +// let mut rng = rand::thread_rng(); +// let random_content_addrs = (0..rng.gen_range(50..100)) +// .map(|_| { +// sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) +// }) +// .collect::>(); + +// // let's first pay only for a subset of the addresses +// let subset_len = random_content_addrs.len() / 3; +// info!("Paying for {subset_len} random addresses...",); +// let storage_payment_result = wallet_client +// .pay_for_storage(random_content_addrs.clone().into_iter().take(subset_len)) +// .await?; + +// let total_cost = storage_payment_result +// .storage_cost +// .checked_add(storage_payment_result.royalty_fees) +// .ok_or(eyre!("Total storage cost exceed possible token amount"))?; + +// // check we've paid only for the subset of addresses, 1 nano per addr +// let new_balance = AttoTokens::from_atto(wallet_original_balance - total_cost.as_atto()); +// info!("Verifying new balance on paying wallet is {new_balance} ..."); +// let paying_wallet = wallet_client.into_wallet(); +// // assert_eq!(paying_wallet.balance(), new_balance);// TODO adapt to evm + +// // let's verify payment proofs for the subset have been cached in the wallet +// assert!(random_content_addrs +// .iter() +// .take(subset_len) +// .all(|name| paying_wallet +// .api() +// .get_recent_payment(&name.as_xorname().unwrap()) +// .is_ok())); + +// // now let's request to pay for all addresses, even that we've already paid for a subset of them +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let storage_payment_result = wallet_client +// .pay_for_storage(random_content_addrs.clone().into_iter()) +// .await?; +// let total_cost = storage_payment_result +// .storage_cost +// .checked_add(storage_payment_result.royalty_fees) +// .ok_or(eyre!("Total storage cost exceed possible token amount"))?; + +// // check we've paid only for addresses we haven't previously paid for, 1 nano per addr +// let new_balance = AttoTokens::from_atto( +// wallet_original_balance - (Amount::from(random_content_addrs.len()) * total_cost.as_atto()), +// ); +// println!("Verifying new balance on paying wallet is now {new_balance} ..."); +// let paying_wallet = wallet_client.into_wallet(); +// // TODO adapt to evm +// // assert_eq!(paying_wallet.balance(), new_balance); + +// // let's verify payment proofs now for all addresses have been cached in the wallet +// // assert!(random_content_addrs +// // .iter() +// // .all(|name| paying_wallet.get_payment_unique_pubkeys(name) == transfer_outputs_map.get(name))); + +// Ok(()) +// } + +// #[tokio::test] +// async fn storage_payment_chunk_upload_succeeds() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir = TempDir::new()?; +// let chunks_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// let (files_api, _content_bytes, file_addr, chunks) = +// random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; + +// info!("Paying for {} random addresses...", chunks.len()); + +// let _cost = wallet_client +// .pay_for_storage( +// chunks +// .iter() +// .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(*name))), +// ) +// .await?; + +// let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.to_path_buf()); +// uploader.set_show_holders(true); +// uploader.insert_chunk_paths(chunks); +// let _upload_stats = uploader.start_upload().await?; + +// let mut files_download = FilesDownload::new(files_api); +// let _ = files_download.download_file(file_addr, None).await?; + +// Ok(()) +// } + +// #[ignore = "This test sends out invalid 0 transactions and needs to be fixed"] +// #[tokio::test] +// async fn storage_payment_chunk_upload_fails_if_no_tokens_sent() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir = TempDir::new()?; +// let chunks_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// let (files_api, content_bytes, content_addr, chunks) = +// random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; + +// let mut no_data_payments = BTreeMap::default(); +// for (chunk_name, _) in chunks.iter() { +// no_data_payments.insert( +// *chunk_name, +// ( +// sn_evm::utils::dummy_address(), +// PaymentQuote::test_dummy(*chunk_name, AttoTokens::from_u64(0)), +// PeerId::random().to_bytes(), +// ), +// ); +// } + +// // TODO adapt to evm +// // let _ = wallet_client +// // .mut_wallet() +// // .send_storage_payment(&no_data_payments) +// // .await?; + +// sleep(Duration::from_secs(5)).await; + +// files_api +// .upload_test_bytes(content_bytes.clone(), false) +// .await?; - info!("Reading {content_addr:?} expected to fail"); - let mut files_download = FilesDownload::new(files_api); - assert!( - matches!( - files_download.download_file(content_addr, None).await, - Err(ClientError::Network(NetworkError::GetRecordError( - GetRecordError::RecordNotFound - ))) - ), - "read bytes should fail as we didn't store them" - ); +// info!("Reading {content_addr:?} expected to fail"); +// let mut files_download = FilesDownload::new(files_api); +// assert!( +// matches!( +// files_download.download_file(content_addr, None).await, +// Err(ClientError::Network(NetworkError::GetRecordError( +// GetRecordError::RecordNotFound +// ))) +// ), +// "read bytes should fail as we didn't store them" +// ); - Ok(()) -} +// Ok(()) +// } -#[tokio::test] -async fn storage_payment_register_creation_succeeds() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// #[tokio::test] +// async fn storage_payment_register_creation_succeeds() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - let paying_wallet_dir = TempDir::new()?; +// let paying_wallet_dir = TempDir::new()?; - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - let mut rng = rand::thread_rng(); - let xor_name = XorName::random(&mut rng); - let address = RegisterAddress::new(xor_name, client.signer_pk()); - let net_addr = NetworkAddress::from_register_address(address); - info!("Paying for random Register address {net_addr:?} ..."); +// let mut rng = rand::thread_rng(); +// let xor_name = XorName::random(&mut rng); +// let address = RegisterAddress::new(xor_name, client.signer_pk()); +// let net_addr = NetworkAddress::from_register_address(address); +// info!("Paying for random Register address {net_addr:?} ..."); - let _cost = wallet_client - .pay_for_storage(std::iter::once(net_addr)) - .await?; +// let _cost = wallet_client +// .pay_for_storage(std::iter::once(net_addr)) +// .await?; - let (mut register, _cost, _royalties_fees) = client - .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) - .await?; +// let (mut register, _cost, _royalties_fees) = client +// .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) +// .await?; + +// println!("Newly created register has {} ops", register.read().len()); + +// let retrieved_reg = client.get_register(address).await?; + +// assert_eq!(register.read(), retrieved_reg.read()); + +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); + +// register.write(&random_entry)?; + +// println!( +// "Register has {} ops after first write", +// register.read().len() +// ); + +// register.sync(&mut wallet_client, true, None).await?; + +// let retrieved_reg = client.get_register(address).await?; + +// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); + +// assert_eq!(retrieved_reg.read().len(), 1); + +// for index in 1..10 { +// println!("current index is {index}"); +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); + +// register.write(&random_entry)?; +// register.sync(&mut wallet_client, true, None).await?; + +// let retrieved_reg = client.get_register(address).await?; + +// println!( +// "current retrieved register entry length is {}", +// retrieved_reg.read().len() +// ); +// println!("current expected entry length is {}", register.read().len()); + +// println!( +// "current retrieved register ops length is {}", +// retrieved_reg.ops.len() +// ); +// println!("current local cached ops length is {}", register.ops.len()); + +// assert_eq!(retrieved_reg.read().len(), register.read().len()); + +// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); + +// println!("Current fetched register is {:?}", retrieved_reg.register); +// println!( +// "Fetched register has update history of {}", +// retrieved_reg.register.log_update_history() +// ); + +// std::thread::sleep(std::time::Duration::from_millis(1000)); +// } + +// Ok(()) +// } + +// #[tokio::test] +// #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] +// async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// let mut rng = rand::thread_rng(); +// let xor_name = XorName::random(&mut rng); +// let address = RegisterAddress::new(xor_name, client.signer_pk()); +// let net_address = +// NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); - println!("Newly created register has {} ops", register.read().len()); - - let retrieved_reg = client.get_register(address).await?; - - assert_eq!(register.read(), retrieved_reg.read()); - - let random_entry = rng.gen::<[u8; 32]>().to_vec(); - - register.write(&random_entry)?; - - println!( - "Register has {} ops after first write", - register.read().len() - ); - - register.sync(&mut wallet_client, true, None).await?; - - let retrieved_reg = client.get_register(address).await?; - - assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); - - assert_eq!(retrieved_reg.read().len(), 1); - - for index in 1..10 { - println!("current index is {index}"); - let random_entry = rng.gen::<[u8; 32]>().to_vec(); - - register.write(&random_entry)?; - register.sync(&mut wallet_client, true, None).await?; - - let retrieved_reg = client.get_register(address).await?; - - println!( - "current retrieved register entry length is {}", - retrieved_reg.read().len() - ); - println!("current expected entry length is {}", register.read().len()); - - println!( - "current retrieved register ops length is {}", - retrieved_reg.ops.len() - ); - println!("current local cached ops length is {}", register.ops.len()); - - assert_eq!(retrieved_reg.read().len(), register.read().len()); - - assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); - - println!("Current fetched register is {:?}", retrieved_reg.register); - println!( - "Fetched register has update history of {}", - retrieved_reg.register.log_update_history() - ); - - std::thread::sleep(std::time::Duration::from_millis(1000)); - } - - Ok(()) -} - -#[tokio::test] -#[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] -async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let mut rng = rand::thread_rng(); - let xor_name = XorName::random(&mut rng); - let address = RegisterAddress::new(xor_name, client.signer_pk()); - let net_address = - NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); - - let mut no_data_payments = BTreeMap::default(); - no_data_payments.insert( - net_address - .as_xorname() - .expect("RegisterAddress should convert to XorName"), - ( - MainPubkey::new(bls::SecretKey::random().public_key()), - PaymentQuote::test_dummy(xor_name, NanoTokens::from(0)), - vec![], - ), - ); - - let _ = wallet_client - .mut_wallet() - .local_send_storage_payment(&no_data_payments)?; - - // this should fail to store as the amount paid is not enough - let (mut register, _cost, _royalties_fees) = client - .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) - .await?; - - sleep(Duration::from_secs(5)).await; - assert!(matches!( - client.get_register(address).await, - Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address - )); - - let random_entry = rng.gen::<[u8; 32]>().to_vec(); - register.write(&random_entry)?; - - sleep(Duration::from_secs(5)).await; - assert!(matches!( - register.sync(&mut wallet_client, false, None).await, - Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address - )); - - Ok(()) -} +// let mut no_data_payments = BTreeMap::default(); +// no_data_payments.insert( +// net_address +// .as_xorname() +// .expect("RegisterAddress should convert to XorName"), +// ( +// sn_evm::utils::dummy_address(), +// PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), +// vec![], +// ), +// ); + +// println!( +// "current retrieved register entry length is {}", +// retrieved_reg.read().len() +// ); +// println!("current expected entry length is {}", register.read().len()); + +// println!( +// "current retrieved register ops length is {}", +// retrieved_reg.ops_list().len() +// ); +// println!( +// "current local cached ops length is {}", +// register.ops_list().len() +// ); + +// // TODO adapt to evm +// // let _ = wallet_client +// // .mut_wallet() +// // .send_storage_payment(&no_data_payments) +// // .await?; + +// // this should fail to store as the amount paid is not enough +// let (mut register, _cost, _royalties_fees) = client +// .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) +// .await?; + +// sleep(Duration::from_secs(5)).await; +// assert!(matches!( +// client.get_register(address).await, +// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// )); + +// println!("Current fetched register is {:?}", retrieved_reg.address()); +// println!( +// "Fetched register has update history of {}", +// retrieved_reg.log_update_history() +// ); + +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// register.write(&random_entry)?; + +// sleep(Duration::from_secs(5)).await; +// assert!(matches!( +// register.sync(&mut wallet_client, false, None).await, +// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// )); + +// Ok(()) +// } diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index 3abf477b18..641756fa2c 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -9,37 +9,31 @@ #![allow(clippy::mutable_key_type)] mod common; -use crate::common::{ +use autonomi::Client; +use bytes::Bytes; +use common::{ client::{get_all_rpc_addresses, get_client_and_funded_wallet}, get_all_peer_ids, get_safenode_rpc_client, NodeRestart, }; -use assert_fs::TempDir; -use common::client::get_wallet; use eyre::{eyre, Result}; use libp2p::{ kad::{KBucketKey, RecordKey}, PeerId, }; use rand::{rngs::OsRng, Rng}; -use sn_client::{Client, FilesApi, Uploader, WalletClient}; use sn_logging::LogBuilder; -use sn_networking::sort_peers_by_key; +use sn_networking::{sleep, sort_peers_by_key}; use sn_protocol::{ safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; -use sn_registers::{Permissions, RegisterAddress}; use std::{ collections::{BTreeSet, HashMap, HashSet}, - fs::File, - io::Write, net::SocketAddr, - path::PathBuf, time::{Duration, Instant}, }; use tonic::Request; use tracing::{debug, error, info}; -use xor_name::XorName; const CHUNK_SIZE: usize = 1024; @@ -103,16 +97,10 @@ async fn verify_data_location() -> Result<()> { let node_rpc_address = get_all_rpc_addresses(true)?; let mut all_peers = get_all_peer_ids(&node_rpc_address).await?; - // Store chunks - println!("Creating a client and paying wallet..."); - debug!("Creating a client and paying wallet..."); + let (client, wallet) = get_client_and_funded_wallet().await; - let paying_wallet_dir = TempDir::new()?; - - let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - store_chunks(client.clone(), chunk_count, paying_wallet_dir.to_path_buf()).await?; - store_registers(client, register_count, paying_wallet_dir.to_path_buf()).await?; + store_chunks(&client, chunk_count, &wallet).await?; + store_registers(&client, register_count, &wallet).await?; // Verify data location initially verify_location(&all_peers, &node_rpc_address).await?; @@ -325,7 +313,11 @@ async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAd } // Generate random Chunks and store them to the Network -async fn store_chunks(client: Client, chunk_count: usize, wallet_dir: PathBuf) -> Result<()> { +async fn store_chunks( + client: &Client, + chunk_count: usize, + wallet: &evmlib::wallet::Wallet, +) -> Result<()> { let start = Instant::now(); let mut rng = OsRng; @@ -335,39 +327,19 @@ async fn store_chunks(client: Client, chunk_count: usize, wallet_dir: PathBuf) - break; } - let chunks_dir = TempDir::new()?; - let random_bytes: Vec = ::std::iter::repeat(()) .map(|()| rng.gen::()) .take(CHUNK_SIZE) .collect(); - let file_path = chunks_dir.join("random_content"); - let mut output_file = File::create(file_path.clone())?; - output_file.write_all(&random_bytes)?; - - let (head_chunk_addr, _data_map, _file_size, chunks) = - FilesApi::chunk_file(&file_path, chunks_dir.path(), true)?; - - debug!( - "Paying storage for ({}) new Chunk/s of file ({} bytes) at {head_chunk_addr:?}", - chunks.len(), - random_bytes.len() - ); - - let key = - PrettyPrintRecordKey::from(&RecordKey::new(&head_chunk_addr.xorname())).into_owned(); + let random_bytes = Bytes::from(random_bytes); - let mut uploader = Uploader::new(client.clone(), wallet_dir.clone()); - uploader.set_show_holders(true); - uploader.set_verify_store(false); - uploader.insert_chunk_paths(chunks); - let _upload_stats = uploader.start_upload().await?; + client.data_put(random_bytes, wallet).await?; uploaded_chunks_count += 1; - println!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); - info!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); + println!("Stored Chunk with len {CHUNK_SIZE}"); + info!("Stored Chunk with len {CHUNK_SIZE}"); } println!( @@ -385,30 +357,42 @@ async fn store_chunks(client: Client, chunk_count: usize, wallet_dir: PathBuf) - Ok(()) } -async fn store_registers(client: Client, register_count: usize, wallet_dir: PathBuf) -> Result<()> { +async fn store_registers( + client: &Client, + register_count: usize, + wallet: &evmlib::wallet::Wallet, +) -> Result<()> { let start = Instant::now(); - let paying_wallet = get_wallet(&wallet_dir); - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); let mut uploaded_registers_count = 0; loop { if uploaded_registers_count >= register_count { break; } - let meta = XorName(rand::random()); - let owner = client.signer_pk(); + // Owner key of the register. + let key = bls::SecretKey::random(); + + // Create a register with the value [1, 2, 3, 4] + let rand_name: String = rand::thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(10) + .map(char::from) + .collect(); + let register = client + .register_create(vec![1, 2, 3, 4].into(), &rand_name, key.clone(), wallet) + .await?; - let addr = RegisterAddress::new(meta, owner); - println!("Creating Register at {addr:?}"); - debug!("Creating Register at {addr:?}"); + println!("Created Register at {:?}", register.address()); + debug!("Created Register at {:?}", register.address()); + sleep(Duration::from_secs(5)).await; - let (mut register, ..) = client - .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) + // Update the register with the value [5, 6, 7, 8] + client + .register_update(register.clone(), vec![5, 6, 7, 8].into(), key) .await?; - println!("Editing Register at {addr:?}"); - debug!("Editing Register at {addr:?}"); - register.write_online("entry".as_bytes(), true).await?; + println!("Updated Register at {:?}", register.address()); + debug!("Updated Register at {:?}", register.address()); uploaded_registers_count += 1; } @@ -422,6 +406,6 @@ async fn store_registers(client: Client, register_count: usize, wallet_dir: Path ); // to make sure the last register was stored - tokio::time::sleep(Duration::from_secs(10)).await; + sleep(Duration::from_secs(10)).await; Ok(()) } diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 2070025fa3..4b152994c4 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.10.6" +version = "0.11.0" [[bin]] name = "safenode-manager" @@ -20,14 +20,16 @@ path = "src/bin/daemon/main.rs" [features] chaos = [] default = ["quic"] -local-discovery = [] +local = [] network-contacts = [] +nightly = [] open-metrics = [] otlp = [] quic = [] statemap = [] tcp = [] websockets = [] +faucet = [] [dependencies] chrono = "~0.4.19" @@ -36,7 +38,7 @@ colored = "2.0.4" color-eyre = "~0.6" dirs-next = "2.0.0" indicatif = { version = "0.17.5", features = ["tokio"] } -libp2p = { version = "0.53", features = [] } +libp2p = { version = "0.54.1", features = [] } libp2p-identity = { version = "0.2.7", features = ["rand"] } prost = { version = "0.9" } rand = "0.8.5" @@ -44,24 +46,28 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_service_management = { path = "../sn_service_management", version = "0.3.14" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12" } +sn_service_management = { path = "../sn_service_management", version = "0.4.0" } sn-releases = "0.2.6" -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } tracing = { version = "~0.1.26" } tonic = { version = "0.6.2" } uuid = { version = "1.5.0", features = ["v4"] } -which = "6.0.1" [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dependencies] nix = { version = "0.27.1", features = ["fs", "user"] } users = "0.11" +[target.'cfg(target_os = "windows")'.dependencies] +which = "6.0.1" + [dev-dependencies] assert_cmd = "2.0.12" assert_fs = "1.0.13" diff --git a/sn_node_manager/src/add_services/config.rs b/sn_node_manager/src/add_services/config.rs index 2d5cac69dc..1910428380 100644 --- a/sn_node_manager/src/add_services/config.rs +++ b/sn_node_manager/src/add_services/config.rs @@ -9,6 +9,7 @@ use color_eyre::{eyre::eyre, Result}; use libp2p::Multiaddr; use service_manager::{ServiceInstallCtx, ServiceLabel}; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use std::{ ffi::OsString, @@ -72,6 +73,7 @@ pub struct InstallNodeServiceCtxBuilder { pub bootstrap_peers: Vec, pub data_dir_path: PathBuf, pub env_variables: Option>, + pub evm_network: EvmNetwork, pub genesis: bool, pub home_network: bool, pub local: bool, @@ -84,6 +86,7 @@ pub struct InstallNodeServiceCtxBuilder { pub node_ip: Option, pub node_port: Option, pub owner: Option, + pub rewards_address: RewardsAddress, pub rpc_socket_addr: SocketAddr, pub safenode_path: PathBuf, pub service_user: Option, @@ -154,6 +157,23 @@ impl InstallNodeServiceCtxBuilder { args.push(OsString::from(peers_str)); } + args.push(OsString::from("--rewards-address")); + args.push(OsString::from(self.rewards_address.to_string())); + + args.push(OsString::from(self.evm_network.to_string())); + if let EvmNetwork::Custom(custom_network) = &self.evm_network { + args.push(OsString::from("--rpc-url")); + args.push(OsString::from(custom_network.rpc_url_http.to_string())); + args.push(OsString::from("--payment-token-address")); + args.push(OsString::from( + custom_network.payment_token_address.to_string(), + )); + args.push(OsString::from("--data-payments-address")); + args.push(OsString::from( + custom_network.data_payments_address.to_string(), + )); + } + Ok(ServiceInstallCtx { args, autostart: self.autostart, @@ -175,6 +195,7 @@ pub struct AddNodeServiceOptions { pub delete_safenode_src: bool, pub enable_metrics_server: bool, pub env_variables: Option>, + pub evm_network: EvmNetwork, pub genesis: bool, pub home_network: bool, pub local: bool, @@ -185,6 +206,7 @@ pub struct AddNodeServiceOptions { pub node_ip: Option, pub node_port: Option, pub owner: Option, + pub rewards_address: RewardsAddress, pub rpc_address: Option, pub rpc_port: Option, pub safenode_src_path: PathBuf, @@ -319,3 +341,271 @@ pub struct AddDaemonServiceOptions { pub user: String, pub version: String, } + +#[cfg(test)] +mod tests { + use super::*; + use sn_evm::{CustomNetwork, RewardsAddress}; + use std::net::{IpAddr, Ipv4Addr}; + + fn create_default_builder() -> InstallNodeServiceCtxBuilder { + InstallNodeServiceCtxBuilder { + autostart: true, + bootstrap_peers: vec![], + data_dir_path: PathBuf::from("/data"), + env_variables: None, + evm_network: EvmNetwork::ArbitrumOne, + genesis: false, + home_network: false, + local: false, + log_dir_path: PathBuf::from("/logs"), + log_format: None, + name: "test-node".to_string(), + max_archived_log_files: None, + max_log_files: None, + metrics_port: None, + node_ip: None, + node_port: None, + owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124") + .unwrap(), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + safenode_path: PathBuf::from("/bin/safenode"), + service_user: None, + upnp: false, + } + } + + fn create_custom_evm_network_builder() -> InstallNodeServiceCtxBuilder { + InstallNodeServiceCtxBuilder { + autostart: true, + bootstrap_peers: vec![], + data_dir_path: PathBuf::from("/data"), + env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse().unwrap(), + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + ) + .unwrap(), + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + ) + .unwrap(), + }), + genesis: false, + home_network: false, + local: false, + log_dir_path: PathBuf::from("/logs"), + log_format: None, + name: "test-node".to_string(), + max_archived_log_files: None, + max_log_files: None, + metrics_port: None, + node_ip: None, + node_port: None, + owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124") + .unwrap(), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + safenode_path: PathBuf::from("/bin/safenode"), + service_user: None, + upnp: false, + } + } + + fn create_builder_with_all_options_enabled() -> InstallNodeServiceCtxBuilder { + InstallNodeServiceCtxBuilder { + autostart: true, + bootstrap_peers: vec![], + data_dir_path: PathBuf::from("/data"), + env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse().unwrap(), + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + ) + .unwrap(), + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + ) + .unwrap(), + }), + genesis: false, + home_network: false, + local: false, + log_dir_path: PathBuf::from("/logs"), + log_format: None, + name: "test-node".to_string(), + max_archived_log_files: Some(10), + max_log_files: Some(10), + metrics_port: None, + node_ip: None, + node_port: None, + owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124") + .unwrap(), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + safenode_path: PathBuf::from("/bin/safenode"), + service_user: None, + upnp: false, + } + } + + #[test] + fn build_should_assign_expected_values_when_mandatory_options_are_provided() { + let builder = create_default_builder(); + let result = builder.build().unwrap(); + + assert_eq!(result.label.to_string(), "test-node"); + assert_eq!(result.program, PathBuf::from("/bin/safenode")); + assert!(result.autostart); + assert_eq!(result.username, None); + assert_eq!(result.working_directory, None); + + let expected_args = vec![ + "--rpc", + "127.0.0.1:8080", + "--root-dir", + "/data", + "--log-output-dest", + "/logs", + "--rewards-address", + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + "evm-arbitrum-one", + ]; + assert_eq!( + result + .args + .iter() + .map(|os| os.to_str().unwrap()) + .collect::>(), + expected_args + ); + } + + #[test] + fn build_should_assign_expected_values_when_a_custom_evm_network_is_provided() { + let builder = create_custom_evm_network_builder(); + let result = builder.build().unwrap(); + + assert_eq!(result.label.to_string(), "test-node"); + assert_eq!(result.program, PathBuf::from("/bin/safenode")); + assert!(result.autostart); + assert_eq!(result.username, None); + assert_eq!(result.working_directory, None); + + let expected_args = vec![ + "--rpc", + "127.0.0.1:8080", + "--root-dir", + "/data", + "--log-output-dest", + "/logs", + "--rewards-address", + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + "evm-custom", + "--rpc-url", + "http://localhost:8545/", + "--payment-token-address", + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "--data-payments-address", + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + ]; + assert_eq!( + result + .args + .iter() + .map(|os| os.to_str().unwrap()) + .collect::>(), + expected_args + ); + } + + #[test] + fn build_should_assign_expected_values_when_all_options_are_enabled() { + let mut builder = create_builder_with_all_options_enabled(); + builder.genesis = true; + builder.home_network = true; + builder.local = true; + builder.log_format = Some(LogFormat::Json); + builder.upnp = true; + builder.node_ip = Some(Ipv4Addr::new(192, 168, 1, 1)); + builder.node_port = Some(12345); + builder.metrics_port = Some(9090); + builder.owner = Some("test-owner".to_string()); + builder.bootstrap_peers = vec![ + "/ip4/127.0.0.1/tcp/8080".parse().unwrap(), + "/ip4/192.168.1.1/tcp/8081".parse().unwrap(), + ]; + builder.service_user = Some("safenode-user".to_string()); + + let result = builder.build().unwrap(); + + let expected_args = vec![ + "--rpc", + "127.0.0.1:8080", + "--root-dir", + "/data", + "--log-output-dest", + "/logs", + "--first", + "--home-network", + "--local", + "--log-format", + "json", + "--upnp", + "--ip", + "192.168.1.1", + "--port", + "12345", + "--metrics-server-port", + "9090", + "--owner", + "test-owner", + "--max-archived-log-files", + "10", + "--max-log-files", + "10", + "--peer", + "/ip4/127.0.0.1/tcp/8080,/ip4/192.168.1.1/tcp/8081", + "--rewards-address", + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + "evm-custom", + "--rpc-url", + "http://localhost:8545/", + "--payment-token-address", + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "--data-payments-address", + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + ]; + assert_eq!( + result + .args + .iter() + .map(|os| os.to_str().unwrap()) + .collect::>(), + expected_args + ); + assert_eq!(result.username, Some("safenode-user".to_string())); + } + + #[test] + fn build_should_assign_expected_values_when_environment_variables_are_provided() { + let mut builder = create_default_builder(); + builder.env_variables = Some(vec![ + ("VAR1".to_string(), "value1".to_string()), + ("VAR2".to_string(), "value2".to_string()), + ]); + + let result = builder.build().unwrap(); + + assert_eq!( + result.environment, + Some(vec![ + ("VAR1".to_string(), "value1".to_string()), + ("VAR2".to_string(), "value2".to_string()), + ]) + ); + } +} diff --git a/sn_node_manager/src/add_services/mod.rs b/sn_node_manager/src/add_services/mod.rs index 86137d881d..96c6cf37a7 100644 --- a/sn_node_manager/src/add_services/mod.rs +++ b/sn_node_manager/src/add_services/mod.rs @@ -222,6 +222,7 @@ pub async fn add_node( bootstrap_peers: options.bootstrap_peers.clone(), data_dir_path: service_data_dir_path.clone(), env_variables: options.env_variables.clone(), + evm_network: options.evm_network.clone(), genesis: options.genesis, home_network: options.home_network, local: options.local, @@ -234,6 +235,7 @@ pub async fn add_node( node_ip: options.node_ip, node_port, owner: owner.clone(), + rewards_address: options.rewards_address, rpc_socket_addr, safenode_path: service_safenode_path.clone(), service_user: options.user.clone(), @@ -256,6 +258,7 @@ pub async fn add_node( auto_restart: options.auto_restart, connected_peers: None, data_dir_path: service_data_dir_path.clone(), + evm_network: options.evm_network.clone(), genesis: options.genesis, home_network: options.home_network, listen_addr: None, @@ -268,6 +271,7 @@ pub async fn add_node( node_ip: options.node_ip, node_port, number: node_number, + rewards_address: options.rewards_address, reward_balance: None, rpc_socket_addr, owner: owner.clone(), diff --git a/sn_node_manager/src/add_services/tests.rs b/sn_node_manager/src/add_services/tests.rs index f53d37f0fc..9833570929 100644 --- a/sn_node_manager/src/add_services/tests.rs +++ b/sn_node_manager/src/add_services/tests.rs @@ -23,12 +23,12 @@ use libp2p::Multiaddr; use mockall::{mock, predicate::*, Sequence}; use predicates::prelude::*; use service_manager::ServiceInstallCtx; +use sn_evm::{AttoTokens, CustomNetwork, EvmNetwork, RewardsAddress}; use sn_service_management::{auditor::AuditorServiceData, control::ServiceControl}; use sn_service_management::{error::Result as ServiceControlResult, NatDetectionStatus}; use sn_service_management::{ DaemonServiceData, FaucetServiceData, NodeRegistry, NodeServiceData, ServiceStatus, }; -use sn_transfers::NanoTokens; use std::{ ffi::OsString, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -115,6 +115,15 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: true, home_network: false, local: true, @@ -127,6 +136,7 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: node_data_dir .to_path_buf() @@ -172,6 +182,18 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -203,6 +225,22 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res node_data_dir.to_path_buf().join("safenode1") ); assert_matches!(node_registry.nodes[0].status, ServiceStatus::Added); + assert_eq!( + node_registry.nodes[0].evm_network, + EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3" + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC" + )?, + }) + ); + assert_eq!( + node_registry.nodes[0].rewards_address, + RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")? + ); Ok(()) } @@ -225,6 +263,15 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: true, home_network: false, listen_addr: None, @@ -240,7 +287,10 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n pid: None, peer_id: None, owner: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), status: ServiceStatus::Added, safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -294,6 +344,18 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -365,6 +427,18 @@ async fn add_genesis_node_should_return_an_error_if_count_is_greater_than_1() -> user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -421,6 +495,15 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -438,6 +521,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( .to_path_buf() .join("safenode1") .join(SAFENODE_FILE_NAME), + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, service_user: Some(get_username()), upnp: false, } @@ -461,6 +545,15 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode2"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -473,6 +566,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8083), safenode_path: node_data_dir .to_path_buf() @@ -501,6 +595,15 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( data_dir_path: node_data_dir.to_path_buf().join("safenode3"), bootstrap_peers: vec![], env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -513,6 +616,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8085), safenode_path: node_data_dir .to_path_buf() @@ -559,6 +663,18 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -664,6 +780,15 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re bootstrap_peers: new_peers.clone(), data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -676,6 +801,7 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -685,6 +811,7 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re upnp: false, } .build()?; + mock_service_control .expect_install() .times(1) @@ -713,14 +840,26 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re node_port: None, rpc_address: None, rpc_port: None, - safenode_dir_path: temp_dir.to_path_buf(), safenode_src_path: safenode_download_path.to_path_buf(), + safenode_dir_path: temp_dir.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), upnp: false, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -800,6 +939,15 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: env_variables.clone(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -812,6 +960,7 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -857,6 +1006,18 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -909,6 +1070,15 @@ async fn add_new_node_should_add_another_service() -> Result<()> { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: true, home_network: false, listen_addr: None, @@ -924,7 +1094,10 @@ async fn add_new_node_should_add_another_service() -> Result<()> { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -957,6 +1130,15 @@ async fn add_new_node_should_add_another_service() -> Result<()> { bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode2"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -968,6 +1150,7 @@ async fn add_new_node_should_add_another_service() -> Result<()> { name: "safenode2".to_string(), node_ip: None, node_port: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8083), owner: None, safenode_path: node_data_dir @@ -1015,6 +1198,18 @@ async fn add_new_node_should_add_another_service() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -1107,6 +1302,15 @@ async fn add_node_should_use_custom_ip() -> Result<()> { ), OsString::from("--ip"), OsString::from(custom_ip.to_string()), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -1153,6 +1357,18 @@ async fn add_node_should_use_custom_ip() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -1210,6 +1426,15 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -1222,6 +1447,7 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { node_ip: None, node_port: Some(custom_port), owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -1268,6 +1494,18 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -1345,6 +1583,15 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { ), OsString::from("--port"), OsString::from("12000"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -1394,6 +1641,15 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { ), OsString::from("--port"), OsString::from("12001"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -1443,6 +1699,15 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { ), OsString::from("--port"), OsString::from("12002"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -1489,6 +1754,18 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -1521,6 +1798,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1536,7 +1822,10 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1588,6 +1877,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -1618,6 +1919,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1633,7 +1943,10 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1685,6 +1998,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -1746,14 +2071,26 @@ async fn add_node_should_return_an_error_if_port_and_node_count_do_not_match() - node_port: Some(PortRange::Range(12000, 12002)), rpc_address: None, rpc_port: None, - safenode_dir_path: temp_dir.to_path_buf(), safenode_src_path: safenode_download_path.to_path_buf(), + safenode_dir_path: temp_dir.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), upnp: false, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -1828,6 +2165,18 @@ async fn add_node_should_return_an_error_if_multiple_services_are_specified_with user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -1909,6 +2258,15 @@ async fn add_node_should_set_random_ports_if_enable_metrics_server_is_true() -> ), OsString::from("--metrics-server-port"), OsString::from("15001"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -1955,6 +2313,18 @@ async fn add_node_should_set_random_ports_if_enable_metrics_server_is_true() -> user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -2028,6 +2398,15 @@ async fn add_node_should_set_max_archived_log_files() -> Result<()> { ), OsString::from("--max-archived-log-files"), OsString::from("20"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2074,6 +2453,18 @@ async fn add_node_should_set_max_archived_log_files() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -2148,6 +2539,15 @@ async fn add_node_should_set_max_log_files() -> Result<()> { ), OsString::from("--max-log-files"), OsString::from("20"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2194,6 +2594,18 @@ async fn add_node_should_set_max_log_files() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -2266,6 +2678,15 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< ), OsString::from("--metrics-server-port"), OsString::from("12000"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2315,6 +2736,15 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< ), OsString::from("--metrics-server-port"), OsString::from("12001"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2364,6 +2794,15 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< ), OsString::from("--metrics-server-port"), OsString::from("12002"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2410,6 +2849,18 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -2439,6 +2890,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2454,7 +2914,10 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2506,6 +2969,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -2537,6 +3012,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2552,7 +3036,10 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2604,6 +3091,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -2673,6 +3172,15 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< .to_string_lossy() .to_string(), ), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2715,6 +3223,15 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< .to_string_lossy() .to_string(), ), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2757,6 +3274,15 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< .to_string_lossy() .to_string(), ), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2803,6 +3329,18 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -2843,6 +3381,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2858,7 +3405,10 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2910,6 +3460,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -2941,6 +3503,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2956,7 +3527,10 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -3008,6 +3582,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -3063,6 +3649,15 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -3075,6 +3670,7 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -3120,6 +3716,18 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -3172,6 +3780,15 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -3184,6 +3801,7 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -3229,6 +3847,18 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -3281,6 +3911,15 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: true, local: false, @@ -3293,6 +3932,7 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -3338,6 +3978,18 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -3415,6 +4067,18 @@ async fn add_node_should_return_an_error_if_nat_status_is_none_but_auto_set_nat_ user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4008,6 +4672,15 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -4020,6 +4693,7 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: node_data_dir .to_path_buf() @@ -4066,6 +4740,18 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4122,6 +4808,15 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( genesis: false, home_network: true, local: false, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, max_archived_log_files: None, @@ -4131,6 +4826,7 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: node_data_dir .to_path_buf() @@ -4177,6 +4873,18 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4230,6 +4938,15 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: true, local: false, @@ -4242,6 +4959,7 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: node_data_dir .to_path_buf() @@ -4288,6 +5006,18 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { user: Some(get_username()), user_mode: true, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4338,6 +5068,15 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: true, local: false, @@ -4350,6 +5089,7 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: node_data_dir .to_path_buf() @@ -4396,6 +5136,18 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { user: Some(get_username()), user_mode: true, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4467,6 +5219,15 @@ async fn add_node_should_assign_an_owner_in_lowercase() -> Result<()> { ), OsString::from("--owner"), OsString::from("discord_username"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -4514,6 +5275,18 @@ async fn add_node_should_assign_an_owner_in_lowercase() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4587,6 +5360,15 @@ async fn add_node_should_auto_restart() -> Result<()> { ), OsString::from("--owner"), OsString::from("discord_username"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: true, contents: None, @@ -4634,6 +5416,18 @@ async fn add_node_should_auto_restart() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index 3ab3b7dcea..9269f76889 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -6,9 +6,13 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +mod subcommands; + +use crate::subcommands::evm_network::EvmNetworkCommand; use clap::{Parser, Subcommand}; use color_eyre::{eyre::eyre, Result}; use libp2p::Multiaddr; +use sn_evm::RewardsAddress; use sn_logging::{LogBuilder, LogFormat}; use sn_node_manager::{ add_services::config::PortRange, @@ -22,22 +26,35 @@ use tracing::Level; const DEFAULT_NODE_COUNT: u16 = 25; #[derive(Parser)] -#[command(author, version, about, long_about = None)] +#[command(disable_version_flag = true)] pub(crate) struct Cmd { /// Available sub commands. #[clap(subcommand)] - pub cmd: SubCmd, + pub cmd: Option, - #[clap(short, long, action = clap::ArgAction::Count, default_value_t = 2)] - verbose: u8, + /// Print the crate version. + #[clap(long)] + pub crate_version: bool, /// Output debug-level logging to stderr. #[clap(long, conflicts_with = "trace")] debug: bool, + /// Print the package version. + #[cfg(not(feature = "nightly"))] + #[clap(long)] + pub package_version: bool, + /// Output trace-level logging to stderr. #[clap(long, conflicts_with = "debug")] trace: bool, + + #[clap(short, long, action = clap::ArgAction::Count, default_value_t = 2)] + verbose: u8, + + /// Print version information. + #[clap(long)] + version: bool, } #[derive(Subcommand, Debug)] @@ -106,6 +123,9 @@ pub enum SubCmd { /// Example: --env SN_LOG=all,RUST_LOG=libp2p=debug #[clap(name = "env", long, use_value_delimiter = true, value_parser = parse_environment_variables)] env_variables: Option>, + /// Specify what EVM network to use for payments. + #[command(subcommand)] + evm_network: EvmNetworkCommand, /// Set this flag to use the safenode '--home-network' feature. /// /// This enables the use of safenode services from a home network with a router. @@ -171,13 +191,6 @@ pub enum SubCmd { /// services, which in this case would be 5. The range must also go from lower to higher. #[clap(long, value_parser = PortRange::parse)] node_port: Option, - /// Provide a path for the safenode binary to be used by the service. - /// - /// Useful for creating the service using a custom built binary. - #[clap(long)] - path: Option, - #[command(flatten)] - peers: PeersArgs, /// Specify the owner for the node service. /// /// This is mainly used for the 'Beta Rewards' programme, for linking your Discord username @@ -187,6 +200,16 @@ pub enum SubCmd { /// run as normal. #[clap(long)] owner: Option, + /// Provide a path for the safenode binary to be used by the service. + /// + /// Useful for creating the service using a custom built binary. + #[clap(long)] + path: Option, + #[command(flatten)] + peers: PeersArgs, + /// Specify the wallet address that will receive the node's earnings. + #[clap(long)] + rewards_address: RewardsAddress, /// Specify an Ipv4Addr for the node's RPC server to run on. /// /// Useful if you want to expose the RPC server pubilcly. Ports are assigned automatically. @@ -823,7 +846,7 @@ pub enum LocalSubCmd { metrics_port: Option, /// Path to a safenode binary. /// - /// Make sure to enable the local-discovery feature flag on the safenode when compiling the binary. + /// Make sure to enable the local feature flag on the safenode when compiling the binary. /// /// The path and version arguments are mutually exclusive. #[clap(long, conflicts_with = "node_version")] @@ -868,6 +891,12 @@ pub enum LocalSubCmd { /// services, which in this case would be 5. The range must also go from lower to higher. #[clap(long, value_parser = PortRange::parse)] rpc_port: Option, + /// Specify the wallet address that will receive the node's earnings. + #[clap(long)] + rewards_address: RewardsAddress, + /// Optionally specify what EVM network to use for payments. + #[command(subcommand)] + evm_network: Option, /// Set to skip the network validation process #[clap(long)] skip_validation: bool, @@ -941,7 +970,7 @@ pub enum LocalSubCmd { metrics_port: Option, /// Path to a safenode binary /// - /// Make sure to enable the local-discovery feature flag on the safenode when compiling the binary. + /// Make sure to enable the local feature flag on the safenode when compiling the binary. /// /// The path and version arguments are mutually exclusive. #[clap(long, conflicts_with = "node_version", conflicts_with = "build")] @@ -985,6 +1014,12 @@ pub enum LocalSubCmd { /// services, which in this case would be 5. The range must also go from lower to higher. #[clap(long, value_parser = PortRange::parse)] rpc_port: Option, + /// Specify the wallet address that will receive the node's earnings. + #[clap(long)] + rewards_address: RewardsAddress, + /// Optionally specify what EVM network to use for payments. + #[command(subcommand)] + evm_network: Option, /// Set to skip the network validation process #[clap(long)] skip_validation: bool, @@ -1008,6 +1043,26 @@ pub enum LocalSubCmd { async fn main() -> Result<()> { color_eyre::install()?; let args = Cmd::parse(); + + if args.version { + println!( + "{}", + sn_build_info::version_string("Autonomi Node Manager", env!("CARGO_PKG_VERSION"), None) + ); + return Ok(()); + } + + if args.crate_version { + println!("{}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if args.package_version { + println!("{}", sn_build_info::package_version()); + return Ok(()); + } + let verbosity = VerbosityLevel::from(args.verbose); let _log_handle = if args.debug || args.trace { @@ -1026,13 +1081,14 @@ async fn main() -> Result<()> { tracing::info!("Executing cmd: {:?}", args.cmd); match args.cmd { - SubCmd::Add { + Some(SubCmd::Add { auto_restart, auto_set_nat_flags, count, data_dir_path, enable_metrics_server, env_variables, + evm_network, home_network, local, log_dir_path, @@ -1045,20 +1101,22 @@ async fn main() -> Result<()> { owner, path, peers, + rewards_address, rpc_address, rpc_port, url, upnp, user, version, - } => { - let _ = cmd::node::add( + }) => { + cmd::node::add( auto_restart, auto_set_nat_flags, count, data_dir_path, enable_metrics_server, env_variables, + Some(evm_network.try_into()?), home_network, local, log_dir_path, @@ -1070,6 +1128,7 @@ async fn main() -> Result<()> { node_port, owner, peers, + rewards_address, rpc_address, rpc_port, path, @@ -1082,7 +1141,7 @@ async fn main() -> Result<()> { .await?; Ok(()) } - SubCmd::Auditor(AuditorSubCmd::Add { + Some(SubCmd::Auditor(AuditorSubCmd::Add { beta_encryption_key, env_variables, log_dir_path, @@ -1090,7 +1149,7 @@ async fn main() -> Result<()> { peers, url, version, - }) => { + })) => { cmd::auditor::add( beta_encryption_key, env_variables, @@ -1103,32 +1162,32 @@ async fn main() -> Result<()> { ) .await } - SubCmd::Auditor(AuditorSubCmd::Start {}) => cmd::auditor::start(verbosity).await, - SubCmd::Auditor(AuditorSubCmd::Stop {}) => cmd::auditor::stop(verbosity).await, - SubCmd::Auditor(AuditorSubCmd::Upgrade { + Some(SubCmd::Auditor(AuditorSubCmd::Start {})) => cmd::auditor::start(verbosity).await, + Some(SubCmd::Auditor(AuditorSubCmd::Stop {})) => cmd::auditor::stop(verbosity).await, + Some(SubCmd::Auditor(AuditorSubCmd::Upgrade { do_not_start, force, env_variables, url, version, - }) => { + })) => { cmd::auditor::upgrade(do_not_start, force, env_variables, url, version, verbosity).await } - SubCmd::Balance { + Some(SubCmd::Balance { peer_id: peer_ids, service_name: service_names, - } => cmd::node::balance(peer_ids, service_names, verbosity).await, - SubCmd::Daemon(DaemonSubCmd::Add { + }) => cmd::node::balance(peer_ids, service_names, verbosity).await, + Some(SubCmd::Daemon(DaemonSubCmd::Add { address, env_variables, port, path, url, version, - }) => cmd::daemon::add(address, env_variables, port, path, url, version, verbosity).await, - SubCmd::Daemon(DaemonSubCmd::Start {}) => cmd::daemon::start(verbosity).await, - SubCmd::Daemon(DaemonSubCmd::Stop {}) => cmd::daemon::stop(verbosity).await, - SubCmd::Faucet(faucet_command) => match faucet_command { + })) => cmd::daemon::add(address, env_variables, port, path, url, version, verbosity).await, + Some(SubCmd::Daemon(DaemonSubCmd::Start {})) => cmd::daemon::start(verbosity).await, + Some(SubCmd::Daemon(DaemonSubCmd::Stop {})) => cmd::daemon::stop(verbosity).await, + Some(SubCmd::Faucet(faucet_command)) => match faucet_command { FaucetSubCmd::Add { env_variables, log_dir_path, @@ -1168,7 +1227,7 @@ async fn main() -> Result<()> { .await } }, - SubCmd::Local(local_command) => match local_command { + Some(SubCmd::Local(local_command)) => match local_command { LocalSubCmd::Join { build, count, @@ -1185,8 +1244,15 @@ async fn main() -> Result<()> { owner_prefix, peers, rpc_port, + rewards_address, + evm_network, skip_validation: _, } => { + let evm_network = if let Some(evm_network) = evm_network { + Some(evm_network.try_into()?) + } else { + None + }; cmd::local::join( build, count, @@ -1203,6 +1269,8 @@ async fn main() -> Result<()> { owner_prefix, peers, rpc_port, + rewards_address, + evm_network, true, verbosity, ) @@ -1225,8 +1293,15 @@ async fn main() -> Result<()> { owner, owner_prefix, rpc_port, + rewards_address, + evm_network, skip_validation: _, } => { + let evm_network = if let Some(evm_network) = evm_network { + Some(evm_network.try_into()?) + } else { + None + }; cmd::local::run( build, clean, @@ -1243,6 +1318,8 @@ async fn main() -> Result<()> { owner, owner_prefix, rpc_port, + rewards_address, + evm_network, true, verbosity, ) @@ -1254,27 +1331,27 @@ async fn main() -> Result<()> { json, } => cmd::local::status(details, fail, json).await, }, - SubCmd::NatDetection(NatDetectionSubCmd::Run { + Some(SubCmd::NatDetection(NatDetectionSubCmd::Run { path, servers, url, version, - }) => { + })) => { cmd::nat_detection::run_nat_detection(servers, true, path, url, version, verbosity) .await } - SubCmd::Remove { + Some(SubCmd::Remove { keep_directories, peer_id: peer_ids, service_name: service_names, - } => cmd::node::remove(keep_directories, peer_ids, service_names, verbosity).await, - SubCmd::Reset { force } => cmd::node::reset(force, verbosity).await, - SubCmd::Start { + }) => cmd::node::remove(keep_directories, peer_ids, service_names, verbosity).await, + Some(SubCmd::Reset { force }) => cmd::node::reset(force, verbosity).await, + Some(SubCmd::Start { connection_timeout, interval, peer_id: peer_ids, service_name: service_names, - } => { + }) => { cmd::node::start( connection_timeout, interval, @@ -1284,16 +1361,16 @@ async fn main() -> Result<()> { ) .await } - SubCmd::Status { + Some(SubCmd::Status { details, fail, json, - } => cmd::node::status(details, fail, json).await, - SubCmd::Stop { + }) => cmd::node::status(details, fail, json).await, + Some(SubCmd::Stop { peer_id: peer_ids, service_name: service_names, - } => cmd::node::stop(peer_ids, service_names, verbosity).await, - SubCmd::Upgrade { + }) => cmd::node::stop(peer_ids, service_names, verbosity).await, + Some(SubCmd::Upgrade { connection_timeout, do_not_start, force, @@ -1304,7 +1381,7 @@ async fn main() -> Result<()> { env_variables: provided_env_variable, url, version, - } => { + }) => { cmd::node::upgrade( connection_timeout, do_not_start, @@ -1320,11 +1397,14 @@ async fn main() -> Result<()> { ) .await } + None => Ok(()), } } fn get_log_builder(level: Level) -> Result { let logging_targets = vec![ + ("evmlib".to_string(), level), + ("evm_testnet".to_string(), level), ("sn_peers_acquisition".to_string(), level), ("sn_node_manager".to_string(), level), ("safenode_manager".to_string(), level), diff --git a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs new file mode 100644 index 0000000000..1683e00e99 --- /dev/null +++ b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs @@ -0,0 +1,69 @@ +// Copyright (C) 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use clap::Subcommand; +use color_eyre::{eyre::Result, Section}; +use sn_evm::{utils::get_evm_network_from_env, EvmNetwork}; + +#[derive(Subcommand, Clone, Debug)] +#[allow(clippy::enum_variant_names)] +pub enum EvmNetworkCommand { + /// Use the Arbitrum One network + EvmArbitrumOne, + + /// Use the Arbitrum Sepolia network + EvmArbitrumSepolia, + + /// Use a custom network + EvmCustom { + /// The RPC URL for the custom network + #[arg(long)] + rpc_url: String, + + /// The payment token contract address + #[arg(long, short)] + payment_token_address: String, + + /// The chunk payments contract address + #[arg(long, short)] + data_payments_address: String, + }, + + /// Use the local EVM testnet, loaded from a CSV file. + EvmLocal, +} + +impl TryInto for EvmNetworkCommand { + type Error = color_eyre::eyre::Error; + + fn try_into(self) -> Result { + match self { + Self::EvmArbitrumOne => Ok(EvmNetwork::ArbitrumOne), + Self::EvmArbitrumSepolia => Ok(EvmNetwork::ArbitrumSepolia), + Self::EvmLocal => { + if !cfg!(feature = "local") { + return Err(color_eyre::eyre::eyre!( + "The 'local' feature flag is not enabled." + )) + .suggestion("Enable the 'local' feature flag to use the local EVM testnet."); + } + let network = get_evm_network_from_env()?; + Ok(network) + } + Self::EvmCustom { + rpc_url, + payment_token_address, + data_payments_address, + } => Ok(EvmNetwork::new_custom( + &rpc_url, + &payment_token_address, + &data_payments_address, + )), + } + } +} diff --git a/sn_node_manager/src/bin/cli/subcommands/mod.rs b/sn_node_manager/src/bin/cli/subcommands/mod.rs new file mode 100644 index 0000000000..7bc6eae583 --- /dev/null +++ b/sn_node_manager/src/bin/cli/subcommands/mod.rs @@ -0,0 +1,9 @@ +// Copyright (C) 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +pub mod evm_network; diff --git a/sn_node_manager/src/bin/daemon/main.rs b/sn_node_manager/src/bin/daemon/main.rs index 99925943be..5de75e2904 100644 --- a/sn_node_manager/src/bin/daemon/main.rs +++ b/sn_node_manager/src/bin/daemon/main.rs @@ -27,16 +27,26 @@ use tonic::{transport::Server, Code, Request, Response, Status}; use tracing::Level; #[derive(Parser, Debug)] -#[clap(author, version, about, long_about = None)] +#[command(disable_version_flag = true)] struct Args { - /// Specify a port for the daemon to listen for RPCs. It defaults to 12500 if not set. - #[clap(long, default_value_t = DAEMON_DEFAULT_PORT)] - port: u16, /// Specify an Ipv4Addr for the daemon to listen on. This is useful if you want to manage the nodes remotely. /// /// If not set, the daemon listens locally for commands. #[clap(long, default_value_t = Ipv4Addr::new(127, 0, 0, 1))] address: Ipv4Addr, + /// Print the crate version. + #[clap(long)] + pub crate_version: bool, + /// Print the package version. + #[cfg(not(feature = "nightly"))] + #[clap(long)] + pub package_version: bool, + /// Specify a port for the daemon to listen for RPCs. It defaults to 12500 if not set. + #[clap(long, default_value_t = DAEMON_DEFAULT_PORT)] + port: u16, + /// Print version information. + #[clap(long)] + version: bool, } struct SafeNodeManagerDaemon {} @@ -128,12 +138,35 @@ impl SafeNodeManagerDaemon {} #[tokio::main(flavor = "current_thread")] async fn main() -> Result<()> { + let args = Args::parse(); + + if args.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Node Manager RPC Daemon", + env!("CARGO_PKG_VERSION"), + None + ) + ); + return Ok(()); + } + + if args.crate_version { + println!("{}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if args.package_version { + println!("{}", sn_build_info::package_version()); + return Ok(()); + } + let _log_handles = get_log_builder()?.initialize()?; println!("Starting safenodemand"); - let args = Args::parse(); let service = SafeNodeManagerDaemon {}; - // adding our service to our server. if let Err(err) = Server::builder() .add_service(SafeNodeManagerServer::new(service)) .serve(SocketAddr::new(IpAddr::V4(args.address), args.port)) diff --git a/sn_node_manager/src/cmd/local.rs b/sn_node_manager/src/cmd/local.rs index 5be4ef15b6..c83938137f 100644 --- a/sn_node_manager/src/cmd/local.rs +++ b/sn_node_manager/src/cmd/local.rs @@ -15,6 +15,7 @@ use crate::{ print_banner, status_report, VerbosityLevel, }; use color_eyre::{eyre::eyre, Help, Report, Result}; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_peers_acquisition::PeersArgs; use sn_releases::{ReleaseType, SafeReleaseRepoActions}; @@ -27,8 +28,8 @@ pub async fn join( build: bool, count: u16, enable_metrics_server: bool, - faucet_path: Option, - faucet_version: Option, + _faucet_path: Option, + _faucet_version: Option, interval: u64, metrics_port: Option, node_path: Option, @@ -39,6 +40,8 @@ pub async fn join( owner_prefix: Option, peers_args: PeersArgs, rpc_port: Option, + rewards_address: RewardsAddress, + evm_network: Option, skip_validation: bool, verbosity: VerbosityLevel, ) -> Result<(), Report> { @@ -58,15 +61,18 @@ pub async fn join( let mut local_node_registry = NodeRegistry::load(local_node_reg_path)?; let release_repo = ::default_config(); + + #[cfg(feature = "faucet")] let faucet_bin_path = get_bin_path( build, - faucet_path, + _faucet_path, ReleaseType::Faucet, - faucet_version, + _faucet_version, &*release_repo, verbosity, ) .await?; + let safenode_bin_path = get_bin_path( build, node_path, @@ -94,6 +100,7 @@ pub async fn join( }; let options = LocalNetworkOptions { enable_metrics_server, + #[cfg(feature = "faucet")] faucet_bin_path, interval, join: true, @@ -107,6 +114,8 @@ pub async fn join( safenode_bin_path, skip_validation, log_format, + rewards_address, + evm_network, }; run_network(options, &mut local_node_registry, &ServiceController {}).await?; Ok(()) @@ -134,8 +143,8 @@ pub async fn run( clean: bool, count: u16, enable_metrics_server: bool, - faucet_path: Option, - faucet_version: Option, + _faucet_path: Option, + _faucet_version: Option, interval: u64, metrics_port: Option, node_path: Option, @@ -145,6 +154,8 @@ pub async fn run( owner: Option, owner_prefix: Option, rpc_port: Option, + rewards_address: RewardsAddress, + evm_network: Option, skip_validation: bool, verbosity: VerbosityLevel, ) -> Result<(), Report> { @@ -185,15 +196,18 @@ pub async fn run( info!("Launching local network"); let release_repo = ::default_config(); + + #[cfg(feature = "faucet")] let faucet_bin_path = get_bin_path( build, - faucet_path, + _faucet_path, ReleaseType::Faucet, - faucet_version, + _faucet_version, &*release_repo, verbosity, ) .await?; + let safenode_bin_path = get_bin_path( build, node_path, @@ -206,6 +220,7 @@ pub async fn run( let options = LocalNetworkOptions { enable_metrics_server, + #[cfg(feature = "faucet")] faucet_bin_path, join: false, interval, @@ -219,6 +234,8 @@ pub async fn run( safenode_bin_path, skip_validation, log_format, + rewards_address, + evm_network, }; run_network(options, &mut local_node_registry, &ServiceController {}).await?; diff --git a/sn_node_manager/src/cmd/mod.rs b/sn_node_manager/src/cmd/mod.rs index ec4055a7a3..9e6af9351d 100644 --- a/sn_node_manager/src/cmd/mod.rs +++ b/sn_node_manager/src/cmd/mod.rs @@ -137,10 +137,8 @@ pub async fn get_bin_path( ) -> Result { if build { debug!("Obtaining bin path for {release_type:?} by building"); - build_binary(&release_type)?; - Ok(PathBuf::from("target") - .join("release") - .join(release_type.to_string())) + let target_dir = build_binary(&release_type)?; + Ok(target_dir.join(release_type.to_string())) } else if let Some(path) = path { debug!("Using the supplied custom binary for {release_type:?}: {path:?}"); Ok(path) @@ -159,7 +157,8 @@ pub async fn get_bin_path( } } -fn build_binary(bin_type: &ReleaseType) -> Result<()> { +// Returns the target dir after building the binary +fn build_binary(bin_type: &ReleaseType) -> Result { debug!("Building {bin_type} binary"); let mut args = vec!["build", "--release"]; let bin_name = bin_type.to_string(); @@ -178,8 +177,8 @@ fn build_binary(bin_type: &ReleaseType) -> Result<()> { if cfg!(feature = "otlp") { args.extend(["--features", "otlp"]); } - if cfg!(feature = "local-discovery") { - args.extend(["--features", "local-discovery"]); + if cfg!(feature = "local") { + args.extend(["--features", "local"]); } if cfg!(feature = "network-contacts") { args.extend(["--features", "network-contacts"]); @@ -193,12 +192,17 @@ fn build_binary(bin_type: &ReleaseType) -> Result<()> { print_banner(&format!("Building {} binary", bin_name)); + let mut target_dir = PathBuf::new(); let mut build_result = Command::new("cargo"); let _ = build_result.args(args.clone()); if let Ok(val) = std::env::var("CARGO_TARGET_DIR") { - let _ = build_result.env("CARGO_TARGET_DIR", val); + let _ = build_result.env("CARGO_TARGET_DIR", val.clone()); + target_dir.push(val); + } else { + target_dir.push("target"); } + let target_dir = target_dir.join("release"); let build_result = build_result .stdout(Stdio::inherit()) @@ -210,5 +214,5 @@ fn build_binary(bin_type: &ReleaseType) -> Result<()> { return Err(eyre!("Failed to build binaries")); } - Ok(()) + Ok(target_dir) } diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index ea30532c45..7d6a10871a 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -22,6 +22,7 @@ use color_eyre::{eyre::eyre, Help, Result}; use colored::Colorize; use libp2p_identity::PeerId; use semver::Version; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_peers_acquisition::PeersArgs; use sn_releases::{ReleaseType, SafeReleaseRepoActions}; @@ -42,6 +43,7 @@ pub async fn add( data_dir_path: Option, enable_metrics_server: bool, env_variables: Option>, + evm_network: Option, home_network: bool, local: bool, log_dir_path: Option, @@ -53,6 +55,7 @@ pub async fn add( node_port: Option, owner: Option, peers_args: PeersArgs, + rewards_address: RewardsAddress, rpc_address: Option, rpc_port: Option, src_path: Option, @@ -142,6 +145,7 @@ pub async fn add( count, delete_safenode_src: src_path.is_none(), enable_metrics_server, + evm_network: evm_network.unwrap_or(EvmNetwork::ArbitrumOne), env_variables, genesis: is_first, home_network, @@ -153,6 +157,7 @@ pub async fn add( node_ip, node_port, owner, + rewards_address, rpc_address, rpc_port, safenode_src_path, @@ -605,6 +610,7 @@ pub async fn maintain_n_running_nodes( data_dir_path: Option, enable_metrics_server: bool, env_variables: Option>, + evm_network: Option, home_network: bool, local: bool, log_dir_path: Option, @@ -616,6 +622,7 @@ pub async fn maintain_n_running_nodes( node_port: Option, owner: Option, peers: PeersArgs, + rewards_address: RewardsAddress, rpc_address: Option, rpc_port: Option, src_path: Option, @@ -708,6 +715,7 @@ pub async fn maintain_n_running_nodes( data_dir_path.clone(), enable_metrics_server, env_variables.clone(), + evm_network.clone(), home_network, local, log_dir_path.clone(), @@ -719,6 +727,7 @@ pub async fn maintain_n_running_nodes( Some(PortRange::Single(port)), owner.clone(), peers.clone(), + rewards_address, rpc_address, rpc_port.clone(), src_path.clone(), diff --git a/sn_node_manager/src/helpers.rs b/sn_node_manager/src/helpers.rs index a841b54e6f..bd0ca2baae 100644 --- a/sn_node_manager/src/helpers.rs +++ b/sn_node_manager/src/helpers.rs @@ -276,7 +276,7 @@ pub async fn download_and_extract_release( } pub fn get_bin_version(bin_path: &PathBuf) -> Result { - trace!("Obtaining version of binary {bin_path:?}"); + debug!("Obtaining version of binary {bin_path:?}"); let mut cmd = Command::new(bin_path) .arg("--version") .stdout(Stdio::piped()) @@ -293,15 +293,28 @@ pub fn get_bin_version(bin_path: &PathBuf) -> Result { .read_to_string(&mut output) .inspect_err(|err| error!("Output contained non utf8 chars: {err:?}"))?; - let version = output - .split_whitespace() - .last() - .ok_or_else(|| { - error!("Failed to parse version"); - eyre!("Failed to parse version") - })? - .to_string(); - trace!("Obtained version of binary: {version}"); + // Extract the first line of the output + let first_line = output.lines().next().ok_or_else(|| { + error!("No output received from binary"); + eyre!("No output received from binary") + })?; + + let version = if let Some(v_pos) = first_line.find('v') { + // Stable binary: Extract version after 'v' + first_line[v_pos + 1..] + .split_whitespace() + .next() + .map(String::from) + } else { + // Nightly binary: Extract the date at the end of the first line + first_line.split_whitespace().last().map(String::from) + } + .ok_or_else(|| { + error!("Failed to parse version from output"); + eyre!("Failed to parse version from output") + })?; + + debug!("Obtained version of binary: {version}"); Ok(version) } diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index aafc905096..721015ed2f 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -41,6 +41,7 @@ impl From for VerbosityLevel { use crate::error::{Error, Result}; use colored::Colorize; use semver::Version; +use sn_evm::AttoTokens; use sn_service_management::rpc::RpcActions; use sn_service_management::{ control::ServiceControl, error::Error as ServiceError, rpc::RpcClient, NodeRegistry, @@ -555,7 +556,7 @@ pub async fn refresh_node_registry( // exists. match HotWallet::try_load_from(&node.data_dir_path) { Ok(wallet) => { - node.reward_balance = Some(wallet.balance()); + node.reward_balance = Some(AttoTokens::from_u64(wallet.balance().as_nano())); trace!( "Wallet balance for node {}: {}", node.service_name, @@ -672,6 +673,7 @@ mod tests { use mockall::{mock, predicate::*}; use predicates::prelude::*; use service_manager::ServiceInstallCtx; + use sn_evm::{AttoTokens, CustomNetwork, EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_service_management::{ error::{Error as ServiceControlError, Result as ServiceControlResult}, @@ -679,7 +681,6 @@ mod tests { rpc::{NetworkInfo, NodeInfo, RecordAddress, RpcActions}, UpgradeOptions, UpgradeResult, }; - use sn_transfers::NanoTokens; use std::{ ffi::OsString, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -767,6 +768,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -782,7 +792,10 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -871,6 +884,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -888,7 +910,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -940,6 +965,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -957,7 +991,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1052,6 +1089,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1069,7 +1115,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1134,6 +1183,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1149,7 +1207,10 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1226,6 +1287,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1241,7 +1311,10 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1317,6 +1390,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1332,7 +1414,10 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1378,6 +1463,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1395,7 +1489,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1429,6 +1526,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1444,7 +1550,10 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1478,6 +1587,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1495,7 +1613,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1530,6 +1651,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1545,7 +1675,10 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1595,6 +1728,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1612,7 +1754,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1723,6 +1868,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1740,7 +1894,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -1813,6 +1970,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1830,7 +1996,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -1948,6 +2117,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1965,7 +2143,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2095,6 +2276,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2112,7 +2302,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2237,6 +2430,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2254,7 +2456,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2380,6 +2585,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2397,7 +2611,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2497,6 +2714,7 @@ mod tests { OsString::from("--log-output-dest"), OsString::from("/var/log/safenode/safenode1"), OsString::from("--upnp"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -2553,6 +2771,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -2570,7 +2789,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2653,6 +2875,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--log-format"), OsString::from("json"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -2709,6 +2932,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -2726,7 +2950,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2812,6 +3039,7 @@ mod tests { OsString::from("--log-output-dest"), OsString::from("/var/log/safenode/safenode1"), OsString::from("--home-network"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -2868,6 +3096,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: true, listen_addr: None, @@ -2885,7 +3114,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2968,6 +3200,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--ip"), OsString::from("192.168.1.1"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3024,6 +3257,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -3041,7 +3275,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3127,6 +3364,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--port"), OsString::from("12000"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3183,6 +3421,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -3200,7 +3439,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3283,6 +3525,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--max-archived-log-files"), OsString::from("20"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3356,7 +3599,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3365,6 +3608,10 @@ mod tests { user: Some("safe".to_string()), user_mode: false, version: current_version.to_string(), + evm_network: EvmNetwork::ArbitrumOne, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); @@ -3442,6 +3689,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--max-log-files"), OsString::from("20"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3515,7 +3763,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3524,6 +3772,10 @@ mod tests { user: Some("safe".to_string()), user_mode: false, version: current_version.to_string(), + evm_network: EvmNetwork::ArbitrumOne, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); @@ -3598,6 +3850,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--metrics-server-port"), OsString::from("12000"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3654,6 +3907,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -3671,7 +3925,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3757,6 +4014,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--metrics-server-port"), OsString::from("12000"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3813,6 +4071,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -3830,7 +4089,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3916,6 +4178,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--owner"), OsString::from("discord_username"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3972,6 +4235,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -3989,7 +4253,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -4075,6 +4342,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--owner"), OsString::from("discord_username"), + OsString::from("evm-arbitrum-one"), ], autostart: true, contents: None, @@ -4131,6 +4399,7 @@ mod tests { auto_restart: true, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -4148,7 +4417,186 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), + safenode_path: current_node_bin.to_path_buf(), + service_name: "safenode1".to_string(), + status: ServiceStatus::Running, + upnp: false, + user: Some("safe".to_string()), + user_mode: false, + version: current_version.to_string(), + }; + let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + + let mut service_manager = ServiceManager::new( + service, + Box::new(mock_service_control), + VerbosityLevel::Normal, + ); + + service_manager + .upgrade(UpgradeOptions { + auto_restart: true, + bootstrap_peers: Vec::new(), + env_variables: None, + force: false, + start_service: true, + target_bin_path: target_node_bin.to_path_buf(), + target_version: Version::parse(target_version).unwrap(), + }) + .await?; + + assert!(service_manager.service.service_data.auto_restart,); + + Ok(()) + } + + #[tokio::test] + async fn upgrade_should_retain_evm_network_settings() -> Result<()> { + let current_version = "0.1.0"; + let target_version = "0.2.0"; + + let tmp_data_dir = assert_fs::TempDir::new()?; + let current_install_dir = tmp_data_dir.child("safenode_install"); + current_install_dir.create_dir_all()?; + + let current_node_bin = current_install_dir.child("safenode"); + current_node_bin.write_binary(b"fake safenode binary")?; + let target_node_bin = tmp_data_dir.child("safenode"); + target_node_bin.write_binary(b"fake safenode binary")?; + + let mut mock_service_control = MockServiceControl::new(); + let mut mock_rpc_client = MockRpcClient::new(); + + // before binary upgrade + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(1000)); + mock_service_control + .expect_stop() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + + // after binary upgrade + mock_service_control + .expect_uninstall() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_install() + .with( + eq(ServiceInstallCtx { + args: vec![ + OsString::from("--rpc"), + OsString::from("127.0.0.1:8081"), + OsString::from("--root-dir"), + OsString::from("/var/safenode-manager/services/safenode1"), + OsString::from("--log-output-dest"), + OsString::from("/var/log/safenode/safenode1"), + OsString::from("--owner"), + OsString::from("discord_username"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), + ], + autostart: true, + contents: None, + environment: None, + label: "safenode1".parse()?, + program: current_node_bin.to_path_buf(), + username: Some("safe".to_string()), + working_directory: None, + }), + eq(false), + ) + .times(1) + .returning(|_, _| Ok(())); + + // after service restart + mock_service_control + .expect_start() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_wait() + .with(eq(3000)) + .times(1) + .returning(|_| ()); + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(100)); + + mock_rpc_client.expect_node_info().times(1).returning(|| { + Ok(NodeInfo { + pid: 2000, + peer_id: PeerId::from_str("12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR")?, + data_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + log_path: PathBuf::from("/var/log/safenode/safenode1"), + version: target_version.to_string(), + uptime: std::time::Duration::from_secs(1), // the service was just started + wallet_balance: 0, + }) + }); + mock_rpc_client + .expect_network_info() + .times(1) + .returning(|| { + Ok(NetworkInfo { + connected_peers: Vec::new(), + listeners: Vec::new(), + }) + }); + + let mut service_data = NodeServiceData { + auto_restart: true, + connected_peers: None, + data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + genesis: false, + home_network: false, + listen_addr: None, + local: false, + log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), + log_format: None, + max_archived_log_files: None, + max_log_files: None, + metrics_port: None, + node_ip: None, + node_port: None, + number: 1, + owner: Some("discord_username".to_string()), + peer_id: Some(PeerId::from_str( + "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", + )?), + pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -4230,6 +4678,7 @@ mod tests { OsString::from("--log-output-dest"), OsString::from("/var/log/safenode/safenode1"), OsString::from("--upnp"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -4289,6 +4738,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -4306,7 +4756,10 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -4363,6 +4816,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: data_dir.to_path_buf(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -4378,7 +4840,10 @@ mod tests { owner: None, pid: None, peer_id: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), status: ServiceStatus::Stopped, @@ -4422,6 +4887,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -4439,7 +4913,10 @@ mod tests { peer_id: Some(PeerId::from_str( "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -4497,6 +4974,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -4514,7 +5000,10 @@ mod tests { peer_id: Some(PeerId::from_str( "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -4564,6 +5053,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: data_dir.to_path_buf(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -4579,7 +5077,10 @@ mod tests { owner: None, pid: None, peer_id: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -4629,6 +5130,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: data_dir.to_path_buf(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -4644,7 +5154,10 @@ mod tests { owner: None, pid: None, peer_id: None, - reward_balance: Some(NanoTokens::zero()), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), status: ServiceStatus::Stopped, diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index ebff840cee..5796cda354 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -8,23 +8,29 @@ use crate::add_services::config::PortRange; use crate::helpers::{ - check_port_availability, get_bin_version, get_start_port_if_applicable, get_username, - increment_port_option, + check_port_availability, get_bin_version, get_start_port_if_applicable, increment_port_option, }; + +#[cfg(feature = "faucet")] +use crate::helpers::get_username; +#[cfg(feature = "faucet")] +use sn_service_management::FaucetServiceData; +#[cfg(feature = "faucet")] +use sn_transfers::get_faucet_data_dir; + use color_eyre::eyre::OptionExt; use color_eyre::{eyre::eyre, Result}; use colored::Colorize; use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; #[cfg(test)] use mockall::automock; - +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_service_management::{ control::ServiceControl, rpc::{RpcActions, RpcClient}, - FaucetServiceData, NodeRegistry, NodeServiceData, ServiceStatus, + NodeRegistry, NodeServiceData, ServiceStatus, }; -use sn_transfers::get_faucet_data_dir; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, @@ -36,7 +42,9 @@ use sysinfo::{Pid, System}; #[cfg_attr(test, automock)] pub trait Launcher { fn get_safenode_path(&self) -> PathBuf; + #[cfg(feature = "faucet")] fn launch_faucet(&self, genesis_multiaddr: &Multiaddr) -> Result; + #[allow(clippy::too_many_arguments)] fn launch_node( &self, bootstrap_peers: Vec, @@ -45,12 +53,15 @@ pub trait Launcher { node_port: Option, owner: Option, rpc_socket_addr: SocketAddr, + rewards_address: RewardsAddress, + evm_network: Option, ) -> Result<()>; fn wait(&self, delay: u64); } #[derive(Default)] pub struct LocalSafeLauncher { + #[cfg(feature = "faucet")] pub faucet_bin_path: PathBuf, pub safenode_bin_path: PathBuf, } @@ -60,13 +71,24 @@ impl Launcher for LocalSafeLauncher { self.safenode_bin_path.clone() } + #[cfg(feature = "faucet")] fn launch_faucet(&self, genesis_multiaddr: &Multiaddr) -> Result { info!("Launching the faucet server..."); + debug!("Using genesis_multiaddr: {}", genesis_multiaddr.to_string()); let args = vec![ "--peer".to_string(), genesis_multiaddr.to_string(), "server".to_string(), ]; + + #[cfg(feature = "faucet")] + debug!( + "Using faucet binary: {}", + self.faucet_bin_path.to_string_lossy() + ); + + debug!("Using args: {}", args.join(" ")); + let child = Command::new(self.faucet_bin_path.clone()) .args(args) .stdout(Stdio::inherit()) @@ -83,6 +105,8 @@ impl Launcher for LocalSafeLauncher { node_port: Option, owner: Option, rpc_socket_addr: SocketAddr, + rewards_address: RewardsAddress, + evm_network: Option, ) -> Result<()> { let mut args = Vec::new(); @@ -119,6 +143,22 @@ impl Launcher for LocalSafeLauncher { args.push("--rpc".to_string()); args.push(rpc_socket_addr.to_string()); + args.push("--rewards-address".to_string()); + args.push(rewards_address.to_string()); + + if let Some(network) = evm_network { + args.push(format!("evm-{}", network.identifier())); + + if let EvmNetwork::Custom(custom) = network { + args.push("--rpc-url".to_string()); + args.push(custom.rpc_url_http.to_string()); + args.push("--payment-token-address".to_string()); + args.push(custom.payment_token_address.to_string()); + args.push("--data-payments-address".to_string()); + args.push(custom.data_payments_address.to_string()); + } + } + Command::new(self.safenode_bin_path.clone()) .args(args) .stdout(Stdio::inherit()) @@ -190,13 +230,21 @@ pub fn kill_network(node_registry: &NodeRegistry, keep_directories: bool) -> Res if !keep_directories { // At this point we don't allow path overrides, so deleting the data directory will clear // the log directory also. - std::fs::remove_dir_all(&node.data_dir_path)?; - debug!("Removed node data directory: {:?}", node.data_dir_path); - println!( - " {} Removed {}", - "✓".green(), - node.data_dir_path.to_string_lossy() - ); + if let Err(e) = std::fs::remove_dir_all(&node.data_dir_path) { + error!("Failed to remove node data directory: {:?}", e); + println!( + " {} Failed to remove {}: {e}", + "✗".red(), + node.data_dir_path.to_string_lossy() + ); + } else { + debug!("Removed node data directory: {:?}", node.data_dir_path); + println!( + " {} Removed {}", + "✓".green(), + node.data_dir_path.to_string_lossy() + ); + } } } @@ -205,6 +253,7 @@ pub fn kill_network(node_registry: &NodeRegistry, keep_directories: bool) -> Res pub struct LocalNetworkOptions { pub enable_metrics_server: bool, + #[cfg(feature = "faucet")] pub faucet_bin_path: PathBuf, pub join: bool, pub interval: u64, @@ -218,6 +267,8 @@ pub struct LocalNetworkOptions { pub safenode_bin_path: PathBuf, pub skip_validation: bool, pub log_format: Option, + pub rewards_address: RewardsAddress, + pub evm_network: Option, } pub async fn run_network( @@ -245,6 +296,7 @@ pub async fn run_network( let launcher = LocalSafeLauncher { safenode_bin_path: options.safenode_bin_path.to_path_buf(), + #[cfg(feature = "faucet")] faucet_bin_path: options.faucet_bin_path.to_path_buf(), }; @@ -261,7 +313,7 @@ pub async fn run_network( .nodes .iter() .find_map(|n| n.listen_addr.clone()) - .ok_or_else(|| eyre!("Unable to obtain a peer to connect to"))?; + .ok_or_eyre("Unable to obtain a peer to connect to")?; (peer, 1) } } else { @@ -294,6 +346,8 @@ pub async fn run_network( number, owner, rpc_socket_addr, + rewards_address: options.rewards_address, + evm_network: options.evm_network.clone(), version: get_bin_version(&launcher.get_safenode_path())?, }, &launcher, @@ -341,6 +395,8 @@ pub async fn run_network( number, owner, rpc_socket_addr, + rewards_address: options.rewards_address, + evm_network: options.evm_network.clone(), version: get_bin_version(&launcher.get_safenode_path())?, }, &launcher, @@ -367,6 +423,7 @@ pub async fn run_network( validate_network(node_registry, bootstrap_peers.clone()).await?; } + #[cfg(feature = "faucet")] if !options.join { println!("Launching the faucet server..."); let pid = launcher.launch_faucet(&bootstrap_peers[0])?; @@ -397,6 +454,8 @@ pub struct RunNodeOptions { pub number: u16, pub owner: Option, pub rpc_socket_addr: SocketAddr, + pub rewards_address: RewardsAddress, + pub evm_network: Option, pub version: String, } @@ -414,6 +473,8 @@ pub async fn run_node( run_options.node_port, run_options.owner.clone(), run_options.rpc_socket_addr, + run_options.rewards_address, + run_options.evm_network.clone(), )?; launcher.wait(run_options.interval); @@ -431,6 +492,7 @@ pub async fn run_node( auto_restart: false, connected_peers, data_dir_path: node_info.data_path, + evm_network: run_options.evm_network.unwrap_or(EvmNetwork::ArbitrumOne), genesis: run_options.genesis, home_network: false, listen_addr: Some(listen_addrs), @@ -446,6 +508,7 @@ pub async fn run_node( owner: run_options.owner, peer_id: Some(peer_id), pid: Some(node_info.pid), + rewards_address: run_options.rewards_address, reward_balance: None, rpc_socket_addr: run_options.rpc_socket_addr, safenode_path: launcher.get_safenode_path(), @@ -527,6 +590,7 @@ mod tests { use libp2p_identity::PeerId; use mockall::mock; use mockall::predicate::*; + use sn_evm::utils::dummy_address; use sn_service_management::{ error::Result as RpcResult, rpc::{NetworkInfo, NodeInfo, RecordAddress, RpcActions}, @@ -552,6 +616,7 @@ mod tests { async fn run_node_should_launch_the_genesis_node() -> Result<()> { let mut mock_launcher = MockLauncher::new(); let mut mock_rpc_client = MockRpcClient::new(); + let rewards_address = dummy_address(); let peer_id = PeerId::from_str("12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR")?; let rpc_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 13000); @@ -564,9 +629,11 @@ mod tests { eq(None), eq(None), eq(rpc_socket_addr), + eq(rewards_address), + eq(None), ) .times(1) - .returning(|_, _, _, _, _, _| Ok(())); + .returning(|_, _, _, _, _, _, _, _| Ok(())); mock_launcher .expect_wait() .with(eq(100)) @@ -612,6 +679,8 @@ mod tests { number: 1, owner: None, rpc_socket_addr, + rewards_address, + evm_network: None, version: "0.100.12".to_string(), }, &mock_launcher, diff --git a/sn_node_manager/src/rpc.rs b/sn_node_manager/src/rpc.rs index b9fc50ced8..57147ccce4 100644 --- a/sn_node_manager/src/rpc.rs +++ b/sn_node_manager/src/rpc.rs @@ -66,6 +66,7 @@ pub async fn restart_node_service( bootstrap_peers: node_registry.bootstrap_peers.clone(), data_dir_path: current_node_clone.data_dir_path.clone(), env_variables: node_registry.environment_variables.clone(), + evm_network: current_node_clone.evm_network.clone(), genesis: current_node_clone.genesis, home_network: current_node_clone.home_network, local: current_node_clone.local, @@ -78,6 +79,7 @@ pub async fn restart_node_service( name: current_node_clone.service_name.clone(), node_ip: current_node_clone.node_ip, node_port: current_node_clone.get_safenode_port(), + rewards_address: current_node_clone.rewards_address, rpc_socket_addr: current_node_clone.rpc_socket_addr, safenode_path: current_node_clone.safenode_path.clone(), service_user: current_node_clone.user.clone(), @@ -184,6 +186,7 @@ pub async fn restart_node_service( bootstrap_peers: node_registry.bootstrap_peers.clone(), data_dir_path: data_dir_path.clone(), env_variables: node_registry.environment_variables.clone(), + evm_network: current_node_clone.evm_network.clone(), genesis: current_node_clone.genesis, home_network: current_node_clone.home_network, local: current_node_clone.local, @@ -196,6 +199,7 @@ pub async fn restart_node_service( node_ip: current_node_clone.node_ip, node_port: None, owner: None, + rewards_address: current_node_clone.rewards_address, rpc_socket_addr: current_node_clone.rpc_socket_addr, safenode_path: safenode_path.clone(), service_user: current_node_clone.user.clone(), @@ -210,6 +214,7 @@ pub async fn restart_node_service( auto_restart: current_node_clone.auto_restart, connected_peers: None, data_dir_path, + evm_network: current_node_clone.evm_network, genesis: current_node_clone.genesis, home_network: current_node_clone.home_network, listen_addr: None, @@ -225,6 +230,7 @@ pub async fn restart_node_service( owner: None, peer_id: None, pid: None, + rewards_address: current_node_clone.rewards_address, reward_balance: current_node_clone.reward_balance, rpc_socket_addr: current_node_clone.rpc_socket_addr, safenode_path, diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index f1c6aaa814..8316e1ea87 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,12 +8,15 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.31" +version = "0.6.32" [[bin]] name = "safenode_rpc_client" path = "src/main.rs" +[features] +nightly = [] + [dependencies] assert_fs = "1.0.0" async-trait = "0.1" @@ -21,15 +24,15 @@ bls = { package = "blsttc", version = "8.0.1" } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "0.6.2" hex = "~0.4.3" -libp2p = { version="0.53", features = ["kad"]} +libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_client = { path = "../sn_client", version = "0.110.4" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_node = { path = "../sn_node", version = "0.111.4" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.3.14" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_node = { path = "../sn_node", version = "0.112.0" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.4.0" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_node_rpc_client/src/main.rs b/sn_node_rpc_client/src/main.rs index 7d019bff95..7930a3b712 100644 --- a/sn_node_rpc_client/src/main.rs +++ b/sn_node_rpc_client/src/main.rs @@ -9,26 +9,35 @@ use clap::Parser; use color_eyre::eyre::Result; - use sn_logging::{Level, LogBuilder}; use sn_node::NodeEvent; - use sn_protocol::safenode_proto::{safe_node_client::SafeNodeClient, NodeEventsRequest}; - use sn_service_management::rpc::{RpcActions, RpcClient}; - use std::{net::SocketAddr, time::Duration}; use tokio_stream::StreamExt; use tonic::Request; #[derive(Parser, Debug)] -#[clap(version, name = "safenode RPC client")] +#[command(disable_version_flag = true)] struct Opt { /// Address of the node's RPC service, e.g. 127.0.0.1:12001. addr: SocketAddr, /// subcommands #[clap(subcommand)] cmd: Cmd, + + /// Print the crate version. + #[clap(long)] + crate_version: bool, + + /// Print the package version. + #[cfg(not(feature = "nightly"))] + #[clap(long)] + package_version: bool, + + /// Print version information. + #[clap(long)] + version: bool, } #[derive(Parser, Debug)] @@ -90,6 +99,29 @@ async fn main() -> Result<()> { let _log_appender_guard = LogBuilder::new(logging_targets).initialize()?; let opt = Opt::parse(); + + if opt.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Node RPC Client", + env!("CARGO_PKG_VERSION"), + None + ) + ); + } + + if opt.crate_version { + println!("Crate version: {}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if opt.package_version { + println!("Package version: {}", sn_build_info::package_version()); + return Ok(()); + } + let addr = opt.addr; match opt.cmd { diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 6e94f6eff0..c8e46ee8be 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,22 +8,22 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.5.3" +version = "0.5.4" [features] -local-discovery = [] +local = [] network-contacts = ["sn_protocol"] websockets = [] [dependencies] clap = { version = "4.2.1", features = ["derive", "env"] } lazy_static = "~1.4.0" -libp2p = { version="0.53", features = [] } +libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_protocol = { path = "../sn_protocol", version = "0.17.11", optional = true} +sn_protocol = { path = "../sn_protocol", version = "0.17.12", optional = true} thiserror = "1.0.23" -tokio = { version = "1.32.0", default-features = false} +tokio = { version = "1.32.0", default-features = false } tracing = { version = "~0.1.26" } url = { version = "2.4.0" } diff --git a/sn_peers_acquisition/src/error.rs b/sn_peers_acquisition/src/error.rs index 1e9f4b7da2..d5df7c969b 100644 --- a/sn_peers_acquisition/src/error.rs +++ b/sn_peers_acquisition/src/error.rs @@ -5,7 +5,7 @@ pub type Result = std::result::Result; #[derive(Debug, Error)] pub enum Error { #[error("Could not parse the supplied multiaddr or socket address")] - InvalidPeerAddr, + InvalidPeerAddr(#[from] libp2p::multiaddr::Error), #[error("Could not obtain network contacts from {0} after {1} retries")] FailedToObtainPeersFromUrl(String, usize), #[error("No valid multaddr was present in the contacts file at {0}")] diff --git a/sn_peers_acquisition/src/lib.rs b/sn_peers_acquisition/src/lib.rs index 10a7b1a775..8c39764d96 100644 --- a/sn_peers_acquisition/src/lib.rs +++ b/sn_peers_acquisition/src/lib.rs @@ -15,8 +15,6 @@ use lazy_static::lazy_static; use libp2p::{multiaddr::Protocol, Multiaddr}; use rand::{seq::SliceRandom, thread_rng}; use reqwest::Client; -#[cfg(feature = "network-contacts")] -use sn_protocol::version::get_network_version; use std::time::Duration; use tracing::*; use url::Url; @@ -24,11 +22,8 @@ use url::Url; #[cfg(feature = "network-contacts")] lazy_static! { // URL containing the multi-addresses of the bootstrap nodes. - pub static ref NETWORK_CONTACTS_URL: String = { - let version = get_network_version(); - let version_prefix = if !version.is_empty() { format!("{version}-") } else { version.to_string() }; - format!("https://sn-testnet.s3.eu-west-2.amazonaws.com/{version_prefix}network-contacts") - }; + pub static ref NETWORK_CONTACTS_URL: String = + "https://sn-testnet.s3.eu-west-2.amazonaws.com/network-contacts".to_string(); } // The maximum number of retries to be performed while trying to get peers from a URL. @@ -60,7 +55,7 @@ pub struct PeersArgs { /// Specify the URL to fetch the network contacts from. /// - /// This argument will be overridden if the "peers" argument is set or if the `local-discovery` + /// This argument will be overridden if the "peers" argument is set or if the `local` /// feature flag is enabled. #[cfg(feature = "network-contacts")] #[clap(long, conflicts_with = "first")] @@ -75,7 +70,7 @@ impl PeersArgs { /// Otherwise, peers are obtained in the following order of precedence: /// * The `--peer` argument. /// * The `SAFE_PEERS` environment variable. - /// * Using the `local-discovery` feature, which will return an empty peer list. + /// * Using the `local` feature, which will return an empty peer list. /// * Using the `network-contacts` feature, which will download the peer list from a file on S3. /// /// Note: the current behaviour is that `--peer` and `SAFE_PEERS` will be combined. Some tests @@ -91,7 +86,7 @@ impl PeersArgs { /// Otherwise, peers are obtained in the following order of precedence: /// * The `--peer` argument. /// * The `SAFE_PEERS` environment variable. - /// * Using the `local-discovery` feature, which will return an empty peer list. + /// * Using the `local` feature, which will return an empty peer list. /// /// This will not fetch the peers from network-contacts even if the `network-contacts` feature is enabled. Use /// get_peers() instead. @@ -111,11 +106,9 @@ impl PeersArgs { let mut peers = if !self.peers.is_empty() { info!("Using peers supplied with the --peer argument(s) or SAFE_PEERS"); self.peers - } else if cfg!(feature = "local-discovery") { + } else if cfg!(feature = "local") { info!("No peers given"); - info!( - "The `local-discovery` feature is enabled, so peers will be discovered through mDNS." - ); + info!("The `local` feature is enabled, so peers will be discovered through mDNS."); return Ok(vec![]); } else if skip_network_contacts { info!("Skipping network contacts"); @@ -159,7 +152,7 @@ impl PeersArgs { } /// Parse strings like `1.2.3.4:1234` and `/ip4/1.2.3.4/tcp/1234` into a multiaddr. -pub fn parse_peer_addr(addr: &str) -> Result { +pub fn parse_peer_addr(addr: &str) -> std::result::Result { // Parse valid IPv4 socket address, e.g. `1.2.3.4:1234`. if let Ok(addr) = addr.parse::() { let start_addr = Multiaddr::from(*addr.ip()); @@ -180,12 +173,7 @@ pub fn parse_peer_addr(addr: &str) -> Result { } // Parse any valid multiaddr string - if let Ok(addr) = addr.parse::() { - debug!("Parsing a full multiaddr: {:?}", addr); - return Ok(addr); - } - - Err(Error::InvalidPeerAddr) + addr.parse::() } /// Get and parse a list of peers from a URL. The URL should contain one multiaddr per line. @@ -249,6 +237,6 @@ pub async fn get_peers_from_url(url: Url) -> Result> { trace!( "Failed to get peers from URL, retrying {retries}/{MAX_RETRIES_ON_GET_PEERS_FROM_URL}" ); - tokio::time::sleep(std::time::Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; } } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index cd100a22ab..622ed3dd4d 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.11" +version = "0.17.12" [features] default = [] @@ -23,14 +23,15 @@ custom_debug = "~0.6.1" dirs-next = "~2.0.0" hex = "~0.4.3" lazy_static = "1.4.0" -libp2p = { version="0.53", features = ["identify", "kad"] } +libp2p = { version = "0.54.1", features = ["identify", "kad"] } rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0" } +sn_registers = { path = "../sn_registers", version = "0.4.0" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } @@ -41,6 +42,7 @@ prost = { version = "0.9" , optional=true } tonic = { version = "0.6.2", optional=true, default-features = false, features = ["prost", "tls", "codegen"]} xor_name = "5.0.0" + [build-dependencies] # watch out updating this, protoc compiler needs to be installed on all build systems # arm builds + musl are very problematic diff --git a/sn_protocol/src/error.rs b/sn_protocol/src/error.rs index 2e481a67ce..f73c356b53 100644 --- a/sn_protocol/src/error.rs +++ b/sn_protocol/src/error.rs @@ -44,6 +44,14 @@ pub enum Error { key: Box, }, + // ---------- Scratchpad errors + /// The provided String can't be deserialized as a RegisterAddress + #[error("Failed to deserialize hex ScratchpadAddress")] + ScratchpadHexDeserializeFailed, + /// The provided SecretyKey failed to decrypt the data + #[error("Failed to derive CipherText from encrypted_data")] + ScratchpadCipherTextFailed, + // ---------- payment errors #[error("There was an error getting the storecost from kademlia store")] GetStoreCostFailed, diff --git a/sn_protocol/src/lib.rs b/sn_protocol/src/lib.rs index 900179bed1..4d3b92628d 100644 --- a/sn_protocol/src/lib.rs +++ b/sn_protocol/src/lib.rs @@ -29,6 +29,7 @@ pub mod safenode_proto { tonic::include_proto!("safenode_proto"); } pub use error::Error; +use storage::ScratchpadAddress; use self::storage::{ChunkAddress, RegisterAddress, SpendAddress}; use bytes::Bytes; @@ -82,6 +83,8 @@ pub enum NetworkAddress { RegisterAddress(RegisterAddress), /// The NetworkAddress is representing a RecordKey. RecordKey(Bytes), + /// The NetworkAddress is representing a ScratchpadAddress. + ScratchpadAddress(ScratchpadAddress), } impl NetworkAddress { @@ -94,6 +97,10 @@ impl NetworkAddress { pub fn from_spend_address(cash_note_address: SpendAddress) -> Self { NetworkAddress::SpendAddress(cash_note_address) } + /// Return a `NetworkAddress` representation of the `SpendAddress`. + pub fn from_scratchpad_address(address: ScratchpadAddress) -> Self { + NetworkAddress::ScratchpadAddress(address) + } /// Return a `NetworkAddress` representation of the `RegisterAddress`. pub fn from_register_address(register_address: RegisterAddress) -> Self { @@ -118,6 +125,7 @@ impl NetworkAddress { NetworkAddress::SpendAddress(cash_note_address) => { cash_note_address.xorname().0.to_vec() } + NetworkAddress::ScratchpadAddress(addr) => addr.xorname().0.to_vec(), NetworkAddress::RegisterAddress(register_address) => { register_address.xorname().0.to_vec() } @@ -141,6 +149,7 @@ impl NetworkAddress { NetworkAddress::SpendAddress(cash_note_address) => Some(*cash_note_address.xorname()), NetworkAddress::ChunkAddress(chunk_address) => Some(*chunk_address.xorname()), NetworkAddress::RegisterAddress(register_address) => Some(register_address.xorname()), + NetworkAddress::ScratchpadAddress(address) => Some(address.xorname()), _ => None, } } @@ -164,6 +173,7 @@ impl NetworkAddress { NetworkAddress::SpendAddress(cash_note_address) => { RecordKey::new(cash_note_address.xorname()) } + NetworkAddress::ScratchpadAddress(addr) => RecordKey::new(&addr.xorname()), NetworkAddress::PeerId(bytes) => RecordKey::new(bytes), } } @@ -216,6 +226,12 @@ impl Debug for NetworkAddress { &spend_address.to_hex()[0..6] ) } + NetworkAddress::ScratchpadAddress(scratchpad_address) => { + format!( + "NetworkAddress::ScratchpadAddress({} - ", + &scratchpad_address.to_hex()[0..6] + ) + } NetworkAddress::RegisterAddress(register_address) => format!( "NetworkAddress::RegisterAddress({} - ", ®ister_address.to_hex()[0..6] @@ -245,6 +261,9 @@ impl Display for NetworkAddress { NetworkAddress::SpendAddress(addr) => { write!(f, "NetworkAddress::SpendAddress({addr:?})") } + NetworkAddress::ScratchpadAddress(addr) => { + write!(f, "NetworkAddress::ScratchpadAddress({addr:?})") + } NetworkAddress::RegisterAddress(addr) => { write!(f, "NetworkAddress::RegisterAddress({addr:?})") } @@ -375,62 +394,10 @@ impl<'a> std::fmt::Debug for PrettyPrintRecordKey<'a> { #[cfg(test)] mod tests { - use crate::{NetworkAddress, PrettyPrintRecordKey}; + use crate::NetworkAddress; use bls::rand::thread_rng; - use bytes::Bytes; - use libp2p::kad::{KBucketKey, RecordKey}; use sn_transfers::SpendAddress; - // A struct that implements hex representation of RecordKey using `bytes::Bytes` - struct OldRecordKeyPrint(RecordKey); - - // old impl using Bytes - impl std::fmt::Display for OldRecordKeyPrint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let b: Vec = self.0.as_ref().to_vec(); - let record_key_b = Bytes::from(b); - let record_key_str = &format!("{record_key_b:64x}")[0..6]; // only the first 6 chars are logged - write!( - f, - "{record_key_str}({:?})", - OldKBucketKeyPrint(NetworkAddress::from_record_key(&self.0).as_kbucket_key()) - ) - } - } - - impl std::fmt::Debug for OldRecordKeyPrint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } - } - - // A struct that implements hex representation of KBucketKey using `bytes::Bytes` - pub struct OldKBucketKeyPrint(KBucketKey>); - - // old impl using Bytes - impl std::fmt::Display for OldKBucketKeyPrint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let kbucket_key_b = Bytes::from(self.0.hashed_bytes().to_vec()); - write!(f, "{kbucket_key_b:64x}") - } - } - - impl std::fmt::Debug for OldKBucketKeyPrint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } - } - - #[test] - fn verify_custom_hex_representation() { - let random = xor_name::XorName::random(&mut thread_rng()); - let key = RecordKey::new(&random.0); - let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); - let old_record_key = OldRecordKeyPrint(key); - - assert_eq!(format!("{pretty_key:?}"), format!("{old_record_key:?}")); - } - #[test] fn verify_spend_addr_is_actionable() { let xorname = xor_name::XorName::random(&mut thread_rng()); diff --git a/sn_protocol/src/messages.rs b/sn_protocol/src/messages.rs index 1cdab98f2e..cbef76ab90 100644 --- a/sn_protocol/src/messages.rs +++ b/sn_protocol/src/messages.rs @@ -16,7 +16,7 @@ mod response; pub use self::{ chunk_proof::{ChunkProof, Nonce}, - cmd::{Cmd, Hash}, + cmd::Cmd, node_id::NodeId, query::Query, register::RegisterCmd, diff --git a/sn_protocol/src/messages/cmd.rs b/sn_protocol/src/messages/cmd.rs index 094d93cae4..a9618ba3f8 100644 --- a/sn_protocol/src/messages/cmd.rs +++ b/sn_protocol/src/messages/cmd.rs @@ -9,8 +9,7 @@ use crate::{storage::RecordType, NetworkAddress}; use serde::{Deserialize, Serialize}; -// TODO: remove this dependency and define these types herein. -pub use sn_transfers::{Hash, PaymentQuote}; +pub use sn_evm::PaymentQuote; /// Data and CashNote cmds - recording spends or creating, updating, and removing data. /// diff --git a/sn_protocol/src/messages/response.rs b/sn_protocol/src/messages/response.rs index 28fb8035f3..17c986f581 100644 --- a/sn_protocol/src/messages/response.rs +++ b/sn_protocol/src/messages/response.rs @@ -12,7 +12,7 @@ use super::ChunkProof; use bytes::Bytes; use core::fmt; use serde::{Deserialize, Serialize}; -use sn_transfers::{MainPubkey, PaymentQuote}; +use sn_evm::{PaymentQuote, RewardsAddress}; use std::fmt::Debug; /// The response to a query, containing the query result. @@ -26,8 +26,8 @@ pub enum QueryResponse { GetStoreCost { /// The store cost quote for storing the next record. quote: Result, - /// The cash_note MainPubkey to pay this node's store cost to. - payment_address: MainPubkey, + /// The rewards address to pay this node's store cost to. + payment_address: RewardsAddress, /// Node's Peer Address peer_address: NetworkAddress, }, diff --git a/sn_protocol/src/node_rpc.rs b/sn_protocol/src/node_rpc.rs index 599e874221..d35ddac5b4 100644 --- a/sn_protocol/src/node_rpc.rs +++ b/sn_protocol/src/node_rpc.rs @@ -15,7 +15,7 @@ pub enum NodeCtrl { /// Request to stop the execution of the safenode app, providing an error as a reason for it. Stop { delay: Duration, - cause: Error, + result: StopResult, }, /// Request to restart the execution of the safenode app, retrying to join the network, after the requested delay. /// Set `retain_peer_id` to `true` if you want to re-use the same root dir/secret keys/PeerId. @@ -26,3 +26,9 @@ pub enum NodeCtrl { // Request to update the safenode app, and restart it, after the requested delay. Update(Duration), } + +#[derive(Debug)] +pub enum StopResult { + Success(String), + Error(Error), +} diff --git a/sn_protocol/src/storage.rs b/sn_protocol/src/storage.rs index 3138c1011a..2935e43fce 100644 --- a/sn_protocol/src/storage.rs +++ b/sn_protocol/src/storage.rs @@ -9,15 +9,17 @@ mod address; mod chunks; mod header; +mod scratchpad; use crate::error::Error; use core::fmt; use std::{str::FromStr, time::Duration}; pub use self::{ - address::{ChunkAddress, RegisterAddress, SpendAddress}, + address::{ChunkAddress, RegisterAddress, ScratchpadAddress, SpendAddress}, chunks::Chunk, header::{try_deserialize_record, try_serialize_record, RecordHeader, RecordKind, RecordType}, + scratchpad::Scratchpad, }; /// Represents the strategy for retrying operations. This encapsulates both the duration it may take for an operation to @@ -25,11 +27,12 @@ pub use self::{ /// Chunk/Registers/Spend to be more flexible. /// /// The Duration/Attempts is chosen based on the internal logic. -#[derive(Clone, Debug, Copy)] +#[derive(Clone, Debug, Copy, Default)] pub enum RetryStrategy { /// Quick: Resolves to a 15-second wait or 1 retry attempt. Quick, /// Balanced: Resolves to a 60-second wait or 3 retry attempt. + #[default] Balanced, /// Persistent: Resolves to a 180-second wait or 6 retry attempt. Persistent, diff --git a/sn_protocol/src/storage/address.rs b/sn_protocol/src/storage/address.rs index 1c00f75dfb..a076b97748 100644 --- a/sn_protocol/src/storage/address.rs +++ b/sn_protocol/src/storage/address.rs @@ -7,7 +7,9 @@ // permissions and limitations relating to use of the SAFE Network Software. mod chunk; +mod scratchpad; pub use self::chunk::ChunkAddress; +pub use self::scratchpad::ScratchpadAddress; pub use sn_registers::RegisterAddress; pub use sn_transfers::SpendAddress; diff --git a/sn_protocol/src/storage/address/scratchpad.rs b/sn_protocol/src/storage/address/scratchpad.rs new file mode 100644 index 0000000000..ecd9735183 --- /dev/null +++ b/sn_protocol/src/storage/address/scratchpad.rs @@ -0,0 +1,90 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::error::{Error, Result}; +use bls::PublicKey; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{Debug, Display}, + hash::Hash, +}; +use xor_name::XorName; + +/// Address of a Scratchpad on the SAFE Network +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub struct ScratchpadAddress { + /// Owner of the scratchpad + pub(crate) owner: PublicKey, +} + +impl Display for ScratchpadAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "({:?})", &self.to_hex()[0..6]) + } +} + +impl Debug for ScratchpadAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "ScratchpadAddress({}) {{ owner: {:?} }}", + &self.to_hex()[0..6], + self.owner + ) + } +} + +impl ScratchpadAddress { + /// Construct a new `ScratchpadAddress` given `owner`. + pub fn new(owner: PublicKey) -> Self { + Self { owner } + } + + /// Return the network name of the scratchpad. + /// This is used to locate the scratchpad on the network. + pub fn xorname(&self) -> XorName { + XorName::from_content(&self.owner.to_bytes()) + } + + /// Serialize this `ScratchpadAddress` instance to a hex-encoded `String`. + pub fn to_hex(&self) -> String { + hex::encode(self.owner.to_bytes()) + } + + /// Deserialize a hex-encoded representation of a `ScratchpadAddress` to a `ScratchpadAddress` instance. + pub fn from_hex(hex: &str) -> Result { + // let bytes = hex::decode(hex).map_err(|_| Error::ScratchpadHexDeserializeFailed)?; + let owner = PublicKey::from_hex(hex).map_err(|_| Error::ScratchpadHexDeserializeFailed)?; + Ok(Self { owner }) + } + + /// Return the owner. + pub fn owner(&self) -> &PublicKey { + &self.owner + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bls::SecretKey; + + #[test] + fn test_scratchpad_hex_conversion() { + let owner = SecretKey::random().public_key(); + let addr = ScratchpadAddress::new(owner); + let hex = addr.to_hex(); + let addr2 = ScratchpadAddress::from_hex(&hex).unwrap(); + + assert_eq!(addr, addr2); + + let bad_hex = format!("{hex}0"); + let err = ScratchpadAddress::from_hex(&bad_hex); + assert_eq!(err, Err(Error::ScratchpadHexDeserializeFailed)); + } +} diff --git a/sn_protocol/src/storage/header.rs b/sn_protocol/src/storage/header.rs index 0a7cc7dd71..96a4515526 100644 --- a/sn_protocol/src/storage/header.rs +++ b/sn_protocol/src/storage/header.rs @@ -21,6 +21,7 @@ use xor_name::XorName; #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub enum RecordType { Chunk, + Scratchpad, NonChunk(XorName), } @@ -36,6 +37,8 @@ pub enum RecordKind { Spend, Register, RegisterWithPayment, + Scratchpad, + ScratchpadWithPayment, } impl Serialize for RecordKind { @@ -49,6 +52,8 @@ impl Serialize for RecordKind { Self::Spend => serializer.serialize_u32(2), Self::Register => serializer.serialize_u32(3), Self::RegisterWithPayment => serializer.serialize_u32(4), + Self::Scratchpad => serializer.serialize_u32(5), + Self::ScratchpadWithPayment => serializer.serialize_u32(6), } } } @@ -65,6 +70,8 @@ impl<'de> Deserialize<'de> for RecordKind { 2 => Ok(Self::Spend), 3 => Ok(Self::Register), 4 => Ok(Self::RegisterWithPayment), + 5 => Ok(Self::Scratchpad), + 6 => Ok(Self::ScratchpadWithPayment), _ => Err(serde::de::Error::custom( "Unexpected integer for RecordKind variant", )), @@ -185,6 +192,18 @@ mod tests { .try_serialize()?; assert_eq!(register.len(), RecordHeader::SIZE); + let scratchpad = RecordHeader { + kind: RecordKind::Scratchpad, + } + .try_serialize()?; + assert_eq!(scratchpad.len(), RecordHeader::SIZE); + + let scratchpad_with_payment = RecordHeader { + kind: RecordKind::ScratchpadWithPayment, + } + .try_serialize()?; + assert_eq!(scratchpad_with_payment.len(), RecordHeader::SIZE); + Ok(()) } } diff --git a/sn_protocol/src/storage/scratchpad.rs b/sn_protocol/src/storage/scratchpad.rs new file mode 100644 index 0000000000..ea38d2e686 --- /dev/null +++ b/sn_protocol/src/storage/scratchpad.rs @@ -0,0 +1,135 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use super::ScratchpadAddress; +use crate::error::{Error, Result}; +use crate::NetworkAddress; +use bls::{Ciphertext, PublicKey, SecretKey, Signature}; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; + +use xor_name::XorName; + +/// Scratchpad, an mutable address for encrypted data +#[derive( + Hash, Eq, PartialEq, PartialOrd, Ord, Clone, custom_debug::Debug, Serialize, Deserialize, +)] +pub struct Scratchpad { + /// Network address. Omitted when serialising and + /// calculated from the `encrypted_data` when deserialising. + address: ScratchpadAddress, + /// Contained data. This should be encrypted + #[debug(skip)] + encrypted_data: Bytes, + /// Monotonically increasing counter to track the number of times this has been updated. + counter: u64, + /// Signature over `Vec`.extend(Xorname::from_content(encrypted_data).to_vec()) from the owning key. + /// Required for scratchpad to be valid. + signature: Option, +} + +impl Scratchpad { + /// Creates a new instance of `Scratchpad`. + pub fn new(owner: PublicKey) -> Self { + Self { + address: ScratchpadAddress::new(owner), + encrypted_data: Bytes::new(), + counter: 0, + signature: None, + } + } + + /// Return the current count + pub fn count(&self) -> u64 { + self.counter + } + + /// Increments the counter value. + pub fn increment(&mut self) -> u64 { + self.counter += 1; + + self.counter + } + + /// Returns the next counter value, + /// + /// Encrypts data and updates the signature with provided sk + pub fn update_and_sign(&mut self, unencrypted_data: Bytes, sk: &SecretKey) -> u64 { + let next_count = self.increment(); + + let pk = self.owner(); + + self.encrypted_data = Bytes::from(pk.encrypt(unencrypted_data).to_bytes()); + + let encrypted_data_xorname = self.encrypted_data_hash().to_vec(); + + let mut bytes_to_sign = self.counter.to_be_bytes().to_vec(); + bytes_to_sign.extend(encrypted_data_xorname); + + self.signature = Some(sk.sign(&bytes_to_sign)); + next_count + } + + /// Verifies the signature and content of the scratchpad are valid for the + /// owner's public key. + pub fn is_valid(&self) -> bool { + if let Some(signature) = &self.signature { + let mut signing_bytes = self.counter.to_be_bytes().to_vec(); + signing_bytes.extend(self.encrypted_data_hash().to_vec()); // add the count + + self.owner().verify(signature, &signing_bytes) + } else { + false + } + } + + /// Returns the encrypted_data. + pub fn encrypted_data(&self) -> &Bytes { + &self.encrypted_data + } + + /// Returns the encrypted_data, decrypted via the passed SecretKey + pub fn decrypt_data(&self, sk: &SecretKey) -> Result> { + Ok(sk + .decrypt( + &Ciphertext::from_bytes(&self.encrypted_data) + .map_err(|_| Error::ScratchpadCipherTextFailed)?, + ) + .map(Bytes::from)) + } + + /// Returns the encrypted_data hash + pub fn encrypted_data_hash(&self) -> XorName { + XorName::from_content(&self.encrypted_data) + } + + /// Returns the owner. + pub fn owner(&self) -> &PublicKey { + self.address.owner() + } + + /// Returns the address. + pub fn address(&self) -> &ScratchpadAddress { + &self.address + } + + /// Returns the NetworkAddress + pub fn network_address(&self) -> NetworkAddress { + NetworkAddress::ScratchpadAddress(self.address) + } + + /// Returns the name. + pub fn name(&self) -> XorName { + self.address.xorname() + } + + /// Returns size of contained encrypted_data. + pub fn payload_size(&self) -> usize { + self.encrypted_data.len() + } +} diff --git a/sn_protocol/src/version.rs b/sn_protocol/src/version.rs index b507c2d725..04921730ef 100644 --- a/sn_protocol/src/version.rs +++ b/sn_protocol/src/version.rs @@ -7,14 +7,13 @@ // permissions and limitations relating to use of the SAFE Network Software. use lazy_static::lazy_static; -use sn_transfers::{FOUNDATION_PK, GENESIS_PK, NETWORK_ROYALTIES_PK, PAYMENT_FORWARD_PK}; +use sn_transfers::{FOUNDATION_PK, GENESIS_PK, NETWORK_ROYALTIES_PK}; lazy_static! { /// The node version used during Identify Behaviour. pub static ref IDENTIFY_NODE_VERSION_STR: String = format!( - "safe{}/node/{}/{}", - write_network_version_with_slash(), + "safe/node/{}/{}", get_truncate_version_str(), get_key_version_str(), ); @@ -22,8 +21,7 @@ lazy_static! { /// The client version used during Identify Behaviour. pub static ref IDENTIFY_CLIENT_VERSION_STR: String = format!( - "safe{}/client/{}/{}", - write_network_version_with_slash(), + "safe/client/{}/{}", get_truncate_version_str(), get_key_version_str(), ); @@ -31,8 +29,7 @@ lazy_static! { /// The req/response protocol version pub static ref REQ_RESPONSE_VERSION_STR: String = format!( - "/safe{}/node/{}/{}", - write_network_version_with_slash(), + "/safe/node/{}/{}", get_truncate_version_str(), get_key_version_str(), ); @@ -40,42 +37,12 @@ lazy_static! { /// The identify protocol version pub static ref IDENTIFY_PROTOCOL_STR: String = format!( - "safe{}/{}/{}", - write_network_version_with_slash(), + "safe/{}/{}", get_truncate_version_str(), get_key_version_str(), ); } -/// Get the network version string. -/// If the network version mode env variable is set to `restricted`, then the git branch is used as the version. -/// Else any non empty string is used as the version string. -/// If the env variable is empty or not set, then we do not apply any network versioning. -pub fn get_network_version() -> &'static str { - // Set this env variable to provide custom network versioning. If it is set to 'restricted', then the git branch name - // is used as the version string. Else we directly use the passed in string as the version. - match option_env!("NETWORK_VERSION_MODE") { - Some(value) => { - if value == "restricted" { - sn_build_info::git_branch() - } else { - value - } - } - _ => "", - } -} - -/// Helper to write the network version with `/` appended if it is not empty -fn write_network_version_with_slash() -> String { - let version = get_network_version(); - if version.is_empty() { - version.to_string() - } else { - format!("/{version}") - } -} - // Protocol support shall be downward compatible for patch only version update. // i.e. versions of `A.B.X` or `A.B.X-alpha.Y` shall be considered as a same protocol of `A.B` fn get_truncate_version_str() -> String { @@ -98,7 +65,5 @@ fn get_key_version_str() -> String { let _ = g_k_str.split_off(6); let mut n_k_str = NETWORK_ROYALTIES_PK.to_hex(); let _ = n_k_str.split_off(6); - let mut p_k_str = PAYMENT_FORWARD_PK.to_hex(); - let _ = p_k_str.split_off(6); - format!("{f_k_str}_{g_k_str}_{n_k_str}_{p_k_str}") + format!("{f_k_str}_{g_k_str}_{n_k_str}") } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index cfdaaccc5f..fd68714064 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.21" +version = "0.4.0" [features] test-utils = [] diff --git a/sn_registers/src/address.rs b/sn_registers/src/address.rs index d0cdacb0ba..f8f2c346a1 100644 --- a/sn_registers/src/address.rs +++ b/sn_registers/src/address.rs @@ -26,8 +26,9 @@ pub struct RegisterAddress { } impl Display for RegisterAddress { + /// Display the register address in hex format that can be parsed by `RegisterAddress::from_hex`. fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}({:?})", &self.to_hex()[0..6], self.xorname()) + write!(f, "{}", &self.to_hex()) } } diff --git a/sn_registers/src/lib.rs b/sn_registers/src/lib.rs index 2fb85cd71f..e9cc34e4f0 100644 --- a/sn_registers/src/lib.rs +++ b/sn_registers/src/lib.rs @@ -19,6 +19,7 @@ pub use self::{ error::Error, metadata::{Entry, EntryHash}, permissions::Permissions, + reg_crdt::RegisterCrdt, register::{Register, SignedRegister}, register_op::RegisterOp, }; diff --git a/sn_registers/src/reg_crdt.rs b/sn_registers/src/reg_crdt.rs index 844b3bfce3..f93002aefc 100644 --- a/sn_registers/src/reg_crdt.rs +++ b/sn_registers/src/reg_crdt.rs @@ -9,17 +9,21 @@ use crate::{error::Result, Entry, EntryHash, Error, RegisterAddress, RegisterOp}; use crdts::merkle_reg::Node as MerkleDagEntry; -use crdts::{merkle_reg::MerkleReg, CmRDT, CvRDT}; +use crdts::{ + merkle_reg::{Hash as CrdtHash, MerkleReg}, + CmRDT, CvRDT, +}; use serde::{Deserialize, Serialize}; use std::{ - collections::BTreeSet, + collections::{BTreeSet, HashSet}, fmt::{self, Debug, Display, Formatter}, hash::Hash, }; +use xor_name::XorName; /// Register data type as a CRDT with Access Control #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd)] -pub(crate) struct RegisterCrdt { +pub struct RegisterCrdt { /// Address on the network of this piece of data address: RegisterAddress, /// CRDT to store the actual data, i.e. the items of the Register. @@ -41,7 +45,7 @@ impl Display for RegisterCrdt { impl RegisterCrdt { /// Constructs a new '`RegisterCrdtImpl`'. - pub(crate) fn new(address: RegisterAddress) -> Self { + pub fn new(address: RegisterAddress) -> Self { Self { address, data: MerkleReg::new(), @@ -49,23 +53,23 @@ impl RegisterCrdt { } /// Returns the address. - pub(crate) fn address(&self) -> &RegisterAddress { + pub fn address(&self) -> &RegisterAddress { &self.address } /// Merge another register into this one. - pub(crate) fn merge(&mut self, other: Self) { + pub fn merge(&mut self, other: Self) { self.data.merge(other.data); } /// Returns total number of items in the register. - pub(crate) fn size(&self) -> u64 { + pub fn size(&self) -> u64 { (self.data.num_nodes() + self.data.num_orphans()) as u64 } /// Write a new entry to the `RegisterCrdt`, returning the hash /// of the entry and the CRDT operation without a signature - pub(crate) fn write( + pub fn write( &mut self, entry: Entry, children: &BTreeSet, @@ -81,7 +85,7 @@ impl RegisterCrdt { } /// Apply a remote data CRDT operation to this replica of the `RegisterCrdtImpl`. - pub(crate) fn apply_op(&mut self, op: RegisterOp) -> Result<()> { + pub fn apply_op(&mut self, op: RegisterOp) -> Result<()> { // Let's first check the op is validly signed. // Note: Perms and valid sig for the op are checked at the upper Register layer. @@ -100,12 +104,12 @@ impl RegisterCrdt { } /// Get the entry corresponding to the provided `hash` if it exists. - pub(crate) fn get(&self, hash: EntryHash) -> Option<&Entry> { + pub fn get(&self, hash: EntryHash) -> Option<&Entry> { self.data.node(hash.0).map(|node| &node.value) } /// Read current entries (multiple entries occur on concurrent writes). - pub(crate) fn read(&self) -> BTreeSet<(EntryHash, Entry)> { + pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> { self.data .read() .hashes_and_nodes() @@ -124,9 +128,84 @@ impl RegisterCrdt { /// Access the underlying MerkleReg (e.g. for access to history) /// NOTE: This API is unstable and may be removed in the future - pub(crate) fn merkle_reg(&self) -> &MerkleReg { + pub fn merkle_reg(&self) -> &MerkleReg { &self.data } + + /// Log the structure of the MerkleReg as a tree view. + /// This is actually being the `update history` of the register. + pub fn log_update_history(&self) -> String { + let mut output = "MerkleReg Structure:\n".to_string(); + output = format!( + "{output}Total entries: {}\n", + self.data.num_nodes() + self.data.num_orphans() + ); + + // Find root nodes (entries with no parents) + let roots: Vec<_> = self.data.read().hashes().into_iter().collect(); + + // Print the tree starting from each root + for (i, root) in roots.iter().enumerate() { + let mut visited = HashSet::new(); + Self::print_tree( + root, + &self.data, + &mut output, + "", + i == roots.len() - 1, + &mut visited, + ); + } + + output + } + + // Helper function to recursively print the MerkleReg tree + fn print_tree( + hash: &CrdtHash, + merkle_reg: &MerkleReg, + output: &mut String, + prefix: &str, + is_last: bool, + visited: &mut HashSet, + ) { + let pretty_hash = format!("{}", XorName::from_content(hash)); + if !visited.insert(*hash) { + *output = format!( + "{}{prefix}{}* {pretty_hash} (cycle detected)\n", + output, + if is_last { "└── " } else { "├── " }, + ); + return; + } + + let entry = if let Some(node) = merkle_reg.node(*hash) { + format!("value: {}", XorName::from_content(&node.value)) + } else { + "value: None".to_string() + }; + *output = format!( + "{}{prefix}{}{pretty_hash}: {entry}\n", + output, + if is_last { "└── " } else { "├── " }, + ); + + let children: Vec<_> = merkle_reg.children(*hash).hashes().into_iter().collect(); + let new_prefix = format!("{prefix}{} ", if is_last { " " } else { "│" }); + + for (i, child) in children.iter().enumerate() { + Self::print_tree( + child, + merkle_reg, + output, + &new_prefix, + i == children.len() - 1, + visited, + ); + } + + visited.remove(hash); + } } #[cfg(test)] diff --git a/sn_registers/src/register.rs b/sn_registers/src/register.rs index 366f73ef0e..2bfda88aa3 100644 --- a/sn_registers/src/register.rs +++ b/sn_registers/src/register.rs @@ -6,15 +6,12 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{ - error::Result, reg_crdt::RegisterCrdt, Entry, EntryHash, Error, Permissions, RegisterAddress, - RegisterOp, -}; - -use bls::{PublicKey, SecretKey, Signature}; -use crdts::merkle_reg::{Hash, MerkleReg}; +use crate::{error::Result, Error, Permissions, RegisterAddress, RegisterOp}; +#[cfg(feature = "test-utils")] +use bls::SecretKey; +use bls::{PublicKey, Signature}; use serde::{Deserialize, Serialize}; -use std::collections::{BTreeSet, HashSet}; +use std::collections::BTreeSet; use xor_name::XorName; /// Arbitrary maximum size of a register entry. @@ -26,8 +23,8 @@ const MAX_REG_NUM_ENTRIES: u16 = 1024; /// A Register on the SAFE Network #[derive(Clone, Eq, PartialEq, PartialOrd, Hash, Serialize, Deserialize, Debug)] pub struct Register { - /// CRDT data of the Register - crdt: RegisterCrdt, + /// contains the info of meta (XorName) and owner (PublicKey) + address: RegisterAddress, /// Permissions of the Register /// Depending on the permissions, the owner can allow other users to write to the register /// Everyone can always read the Register because all data is public @@ -39,8 +36,8 @@ pub struct Register { #[derive(Clone, Debug, Serialize, Deserialize, PartialOrd, PartialEq, Eq, Hash)] pub struct SignedRegister { /// the base register we had at creation - base_register: Register, - /// signature over the above by the owner + register: Register, + /// signature over the above register by the owner signature: Signature, /// operations to apply on this register, /// they contain a signature of the writer @@ -49,24 +46,29 @@ pub struct SignedRegister { impl SignedRegister { /// Create a new SignedRegister - pub fn new(base_register: Register, signature: Signature) -> Self { + pub fn new(register: Register, signature: Signature, ops: BTreeSet) -> Self { Self { - base_register, + register, signature, - ops: BTreeSet::new(), + ops, } } /// Return the base register. This is the register before any operations have been applied. pub fn base_register(&self) -> &Register { - &self.base_register + &self.register } /// Verfies a SignedRegister pub fn verify(&self) -> Result<()> { - let bytes = self.base_register.bytes()?; + let reg_size = self.ops.len(); + if reg_size >= MAX_REG_NUM_ENTRIES as usize { + return Err(Error::TooManyEntries(reg_size)); + } + + let bytes = self.register.bytes()?; if !self - .base_register + .register .owner() .verify(&self.signature, bytes.as_slice()) { @@ -74,13 +76,20 @@ impl SignedRegister { } for op in &self.ops { - self.base_register.check_register_op(op)?; + self.register.check_register_op(op)?; + let size = op.crdt_op.value.len(); + if size > MAX_REG_ENTRY_SIZE { + return Err(Error::EntryTooBig { + size, + max: MAX_REG_ENTRY_SIZE, + }); + } } Ok(()) } pub fn verify_with_address(&self, address: RegisterAddress) -> Result<()> { - if self.base_register.address() != &address { + if self.register.address() != &address { return Err(Error::InvalidRegisterAddress { requested: Box::new(address), got: Box::new(*self.address()), @@ -89,19 +98,9 @@ impl SignedRegister { self.verify() } - /// Return the Register after applying all the operations - pub fn register(self) -> Result { - let mut register = self.base_register; - for op in self.ops { - register.apply_op(op)?; - } - Ok(register) - } - /// Merge two SignedRegisters pub fn merge(&mut self, other: &Self) -> Result<()> { - self.base_register - .verify_is_mergeable(&other.base_register)?; + self.register.verify_is_mergeable(&other.register)?; self.ops.extend(other.ops.clone()); Ok(()) } @@ -109,8 +108,7 @@ impl SignedRegister { /// Merge two SignedRegisters but verify the incoming content /// Significantly slower than merge, use when you want to trust but verify the `other` pub fn verified_merge(&mut self, other: &Self) -> Result<()> { - self.base_register - .verify_is_mergeable(&other.base_register)?; + self.register.verify_is_mergeable(&other.register)?; other.verify()?; self.ops.extend(other.ops.clone()); Ok(()) @@ -118,89 +116,80 @@ impl SignedRegister { /// Return the address. pub fn address(&self) -> &RegisterAddress { - self.base_register.address() + self.register.address() } /// Return the owner of the data. pub fn owner(&self) -> PublicKey { - self.base_register.owner() + self.register.owner() } /// Check and add an Op to the SignedRegister pub fn add_op(&mut self, op: RegisterOp) -> Result<()> { - self.base_register.check_register_op(&op)?; + let reg_size = self.ops.len(); + if reg_size >= MAX_REG_NUM_ENTRIES as usize { + return Err(Error::TooManyEntries(reg_size)); + } + + let size = op.crdt_op.value.len(); + if size > MAX_REG_ENTRY_SIZE { + return Err(Error::EntryTooBig { + size, + max: MAX_REG_ENTRY_SIZE, + }); + } + + self.register.check_register_op(&op)?; self.ops.insert(op); Ok(()) } - /// Access the underlying MerkleReg (e.g. for access to history) - /// NOTE: This API is unstable and may be removed in the future - pub fn merkle_reg(&self) -> &MerkleReg { - self.base_register.merkle_reg() + /// Returns the reference to the ops list + pub fn ops(&self) -> &BTreeSet { + &self.ops + } + + /// Used in tests. + #[cfg(feature = "test-utils")] + pub fn test_new_from_address(address: RegisterAddress, owner: &SecretKey) -> Self { + let base_register = Register { + address, + permissions: Permissions::AnyoneCanWrite, + }; + let bytes = if let Ok(bytes) = base_register.bytes() { + bytes + } else { + panic!("Failed to serialize register {base_register:?}"); + }; + let signature = owner.sign(bytes); + Self::new(base_register, signature, BTreeSet::new()) } } impl Register { /// Create a new Register pub fn new(owner: PublicKey, meta: XorName, mut permissions: Permissions) -> Self { - let address = RegisterAddress { meta, owner }; permissions.add_writer(owner); Self { - crdt: RegisterCrdt::new(address), + address: RegisterAddress { meta, owner }, permissions, } } - /// Sign a Register and return the signature, makes sure the signer is the owner in the process - pub fn sign(&self, secret_key: &SecretKey) -> Result { - if self.owner() != secret_key.public_key() { - return Err(Error::InvalidSecretKey); - } - let bytes = self.bytes()?; - let signature = secret_key.sign(bytes); - Ok(signature) - } - /// Returns a bytes version of the Register used for signing /// Use this API when you want to sign a Register withtout providing a secret key to the Register API pub fn bytes(&self) -> Result> { rmp_serde::to_vec(self).map_err(|_| Error::SerialisationFailed) } - /// Sign a Register into a SignedRegister - pub fn into_signed(self, secret_key: &SecretKey) -> Result { - let signature = self.sign(secret_key)?; - Ok(SignedRegister::new(self, signature)) - } - /// Return the address. pub fn address(&self) -> &RegisterAddress { - self.crdt.address() + &self.address } /// Return the owner of the data. pub fn owner(&self) -> PublicKey { - self.address().owner() - } - - /// Return the number of items held in the register - pub fn size(&self) -> u64 { - self.crdt.size() - } - - /// Return a value corresponding to the provided 'hash', if present. - pub fn get(&self, hash: EntryHash) -> Result<&Entry> { - self.crdt.get(hash).ok_or(Error::NoSuchEntry(hash)) - } - - /// Read the last entry, or entries when there are branches, if the register is not empty. - pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> { - self.crdt.read() - } - - /// Returns the children of an entry, along with their corresponding entry hashes - pub fn children(&self, hash: &EntryHash) -> BTreeSet<(EntryHash, Entry)> { - self.crdt.children(hash) + self.address.owner() } /// Return the permission. @@ -208,37 +197,6 @@ impl Register { &self.permissions } - /// Write an entry to the Register, returning the generated - /// CRDT operation so the caller can sign and broadcast it to other replicas, - /// along with the hash of the entry just written. - pub fn write( - &mut self, - entry: Entry, - children: &BTreeSet, - signer: &SecretKey, - ) -> Result<(EntryHash, RegisterOp)> { - self.check_entry_and_reg_sizes(&entry)?; - // check permissions before writing on the underlying CRDT - self.check_user_permissions(signer.public_key())?; - let (hash, address, crdt_op) = self.crdt.write(entry, children)?; - let op = RegisterOp::new(address, crdt_op, signer); - Ok((hash, op)) - } - - /// Apply a signed data CRDT operation. - pub fn apply_op(&mut self, op: RegisterOp) -> Result<()> { - self.check_entry_and_reg_sizes(&op.crdt_op.value)?; - self.check_register_op(&op)?; - self.crdt.apply_op(op) - } - - /// Merge another Register into this one. - pub fn merge(&mut self, other: &Self) -> Result<()> { - self.verify_is_mergeable(other)?; - self.crdt.merge(other.crdt.clone()); - Ok(()) - } - /// Check if a register op is valid for our current register pub fn check_register_op(&self, op: &RegisterOp) -> Result<()> { if self.permissions.can_anyone_write() { @@ -261,107 +219,6 @@ impl Register { } } - /// Access the underlying MerkleReg (e.g. for access to history) - /// NOTE: This API is unstable and may be removed in the future - pub fn merkle_reg(&self) -> &MerkleReg { - self.crdt.merkle_reg() - } - - /// Log the structure of the MerkleReg within this Register's CRDT as a tree view. - /// This is actually being the `update history` of the register. - pub fn log_update_history(&self) -> String { - let mut output = "MerkleReg Structure:\n".to_string(); - let merkle_reg = self.crdt.merkle_reg(); - output = format!( - "{output}Total entries: {}\n", - merkle_reg.num_nodes() + merkle_reg.num_orphans() - ); - - // Find root nodes (entries with no parents) - let roots: Vec<_> = merkle_reg.read().hashes().into_iter().collect(); - - // Print the tree starting from each root - for (i, root) in roots.iter().enumerate() { - let mut visited = HashSet::new(); - Self::print_tree( - root, - merkle_reg, - &mut output, - "", - i == roots.len() - 1, - &mut visited, - ); - } - - output - } - - // Helper function to recursively print the MerkleReg tree - fn print_tree( - hash: &Hash, - merkle_reg: &MerkleReg, - output: &mut String, - prefix: &str, - is_last: bool, - visited: &mut HashSet, - ) { - let pretty_hash = format!("{}", XorName::from_content(hash)); - if !visited.insert(*hash) { - *output = format!( - "{}{prefix}{}* {pretty_hash} (cycle detected)\n", - output, - if is_last { "└── " } else { "├── " }, - ); - return; - } - - let entry = if let Some(node) = merkle_reg.node(*hash) { - format!("value: {}", XorName::from_content(&node.value)) - } else { - "value: None".to_string() - }; - *output = format!( - "{}{prefix}{}{pretty_hash}: {entry}\n", - output, - if is_last { "└── " } else { "├── " }, - ); - - let children: Vec<_> = merkle_reg.children(*hash).hashes().into_iter().collect(); - let new_prefix = format!("{prefix}{} ", if is_last { " " } else { "│" }); - - for (i, child) in children.iter().enumerate() { - Self::print_tree( - child, - merkle_reg, - output, - &new_prefix, - i == children.len() - 1, - visited, - ); - } - - visited.remove(hash); - } - - // Private helper to check the given Entry's size is within define limit, - // as well as check the Register hasn't already reached the maximum number of entries. - fn check_entry_and_reg_sizes(&self, entry: &Entry) -> Result<()> { - let size = entry.len(); - if size > MAX_REG_ENTRY_SIZE { - return Err(Error::EntryTooBig { - size, - max: MAX_REG_ENTRY_SIZE, - }); - } - - let reg_size = self.crdt.size(); - if reg_size >= MAX_REG_NUM_ENTRIES.into() { - return Err(Error::TooManyEntries(reg_size as usize)); - } - - Ok(()) - } - // Private helper to check if this Register is mergeable with another fn verify_is_mergeable(&self, other: &Self) -> Result<()> { if self.address() != other.address() || self.permissions != other.permissions { @@ -369,30 +226,17 @@ impl Register { } Ok(()) } - - /// Used in tests. - #[cfg(feature = "test-utils")] - pub fn test_new_from_address(address: RegisterAddress) -> Self { - Register { - crdt: RegisterCrdt::new(address), - permissions: Permissions::AnyoneCanWrite, - } - } } #[cfg(test)] mod tests { - use crate::RegisterOp; + use crate::{RegisterCrdt, RegisterOp}; - use super::{ - EntryHash, Error, Permissions, Register, RegisterAddress, Result, MAX_REG_NUM_ENTRIES, - }; + use super::*; use bls::SecretKey; - use eyre::Context; - use proptest::prelude::*; - use rand::{rngs::OsRng, seq::SliceRandom, thread_rng, Rng}; - use std::{collections::BTreeSet, sync::Arc}; + use rand::{thread_rng, Rng}; + use std::collections::BTreeSet; use xor_name::XorName; #[test] @@ -408,111 +252,61 @@ mod tests { assert_eq!(*register.address(), address); } - #[test] - fn register_generate_entry_hash() -> eyre::Result<()> { - let authority_sk = SecretKey::random(); - let authority = authority_sk.public_key(); - - let meta: XorName = xor_name::rand::random(); - - let mut replica1 = Register::new(authority, meta, Permissions::default()); - let mut replica2 = Register::new(authority, meta, Permissions::default()); - - // Different item from same replica's root shall having different entry_hash - let item1 = random_register_entry(); - let item2 = random_register_entry(); - let (entry_hash1_1, _) = replica1.write(item1.clone(), &BTreeSet::new(), &authority_sk)?; - let (entry_hash1_2, _) = replica1.write(item2, &BTreeSet::new(), &authority_sk)?; - assert!(entry_hash1_1 != entry_hash1_2); - - // Same item from different replica's root shall remain same - let (entry_hash2_1, _) = replica2.write(item1, &BTreeSet::new(), &authority_sk)?; - assert_eq!(entry_hash1_1, entry_hash2_1); - - let mut parents = BTreeSet::new(); - // Different item from different replica with same parents shall be different - let _ = parents.insert(entry_hash1_1); - let item3 = random_register_entry(); - let item4 = random_register_entry(); - let (entry_hash1_1_3, _) = replica1.write(item3, &parents, &authority_sk)?; - let (entry_hash2_1_4, _) = replica2.write(item4, &parents, &authority_sk)?; - assert!(entry_hash1_1_3 != entry_hash2_1_4); - - Ok(()) - } - #[test] fn register_permissions() -> eyre::Result<()> { let owner_sk = SecretKey::random(); let owner = owner_sk.public_key(); - let other_user_sk = SecretKey::random(); - let other_user = other_user_sk.public_key(); + let user_sk_1 = SecretKey::random(); + let other_user = user_sk_1.public_key(); + let user_sk_2 = SecretKey::random(); let meta: XorName = xor_name::rand::random(); - let item = random_register_entry(); + let address = RegisterAddress { meta, owner }; // Create replicas where anyone can write to them, including the owner ofc - let mut replica1 = Register::new(owner, meta, Permissions::new_anyone_can_write()); - let mut replica2 = replica1.clone(); - let mut signed_replica3 = replica1.clone().into_signed(&owner_sk)?; - // ...owner and the other user can both write to them - let (_, op1) = replica1.write(item.clone(), &BTreeSet::new(), &owner_sk)?; - let (_, op2) = replica1.write(item.clone(), &BTreeSet::new(), &other_user_sk)?; - replica2.apply_op(op1)?; - replica2.apply_op(op2)?; - signed_replica3.verified_merge(&replica2.into_signed(&owner_sk)?)?; + let mut signed_reg_1 = create_reg_replica_with( + meta, + Some(owner_sk.clone()), + Some(Permissions::new_anyone_can_write()), + ); + // ...owner and any other users can both write to them + let op = generate_random_op(address, &owner_sk)?; + assert!(signed_reg_1.add_op(op).is_ok()); + let op = generate_random_op(address, &user_sk_1)?; + assert!(signed_reg_1.add_op(op).is_ok()); + let op = generate_random_op(address, &user_sk_2)?; + assert!(signed_reg_1.add_op(op).is_ok()); // Create replicas allowing both the owner and other user to write to them - let mut replica1 = Register::new(owner, meta, Permissions::new_with([other_user])); - let mut replica2 = replica1.clone(); - let mut signed_replica3 = replica1.clone().into_signed(&owner_sk)?; - // ...owner and the other user can both write to them - let (_, op1) = replica1.write(item.clone(), &BTreeSet::new(), &owner_sk)?; - let (_, op2) = replica1.write(item.clone(), &BTreeSet::new(), &other_user_sk)?; - replica2.apply_op(op1)?; - replica2.apply_op(op2)?; - signed_replica3.verified_merge(&replica2.into_signed(&owner_sk)?)?; + let mut signed_reg_2 = create_reg_replica_with( + meta, + Some(owner_sk.clone()), + Some(Permissions::new_with([other_user])), + ); + // ...owner and the other user can both write to them, others shall fail + let op = generate_random_op(address, &owner_sk)?; + assert!(signed_reg_2.add_op(op).is_ok()); + let op = generate_random_op(address, &user_sk_1)?; + assert!(signed_reg_2.add_op(op).is_ok()); + let op = generate_random_op(address, &user_sk_2)?; + assert!(signed_reg_2.add_op(op).is_err()); // Create replicas with the owner as the only allowed to write - let mut replica1 = Register::new(owner, meta, Permissions::default()); - let mut replica2 = replica1.clone(); + let mut signed_reg_3 = create_reg_replica_with(meta, Some(owner_sk.clone()), None); // ...owner can write to them - let (_, op) = replica1.write(item.clone(), &BTreeSet::new(), &owner_sk)?; - replica2.apply_op(op.clone())?; + let op = generate_random_op(address, &owner_sk)?; + assert!(signed_reg_3.add_op(op).is_ok()); // ...whilst other user cannot write to them - let res = replica1.write(item.clone(), &BTreeSet::new(), &other_user_sk); + let op = generate_random_op(address, &user_sk_1)?; + let res = signed_reg_3.add_op(op); assert!( matches!(&res, Err(err) if err == &Error::AccessDenied(other_user)), "Unexpected result: {res:?}" ); - let (_, address, crdt_op) = replica1.crdt.write(item.clone(), &BTreeSet::new())?; - let op_signed_by_other_user = RegisterOp::new(address, crdt_op, &other_user_sk); - let res = replica2.apply_op(op_signed_by_other_user); - assert!( - matches!(&res, Err(err) if err == &Error::AccessDenied(other_user)), - "Unexpected result: {res:?}" - ); - - // Create Registers with different permissions to write - let mut reg1 = Register::new(owner, meta, Permissions::default()); - let mut reg2 = Register::new(owner, meta, Permissions::new_with([other_user])); - // ...owner can write to both of them, the other user only to one of them - reg1.write(item.clone(), &BTreeSet::new(), &owner_sk)?; - reg2.write(item.clone(), &BTreeSet::new(), &owner_sk)?; - reg2.write(item.clone(), &BTreeSet::new(), &other_user_sk)?; - // ...but they cannot be merged due to different permissions sets - let res1 = reg1.merge(®2); - let res2 = reg2.merge(®1); - assert!( - matches!(&res1, Err(err) if err == &Error::DifferentBaseRegister), - "Unexpected result: {res1:?}" - ); - assert_eq!(res1, res2); - let mut signed_reg1 = reg1.into_signed(&owner_sk)?; - let mut signed_reg2 = reg2.into_signed(&owner_sk)?; - let res1 = signed_reg1.verified_merge(&signed_reg2); - let res2 = signed_reg2.verified_merge(&signed_reg1); + // Registers with different permission can not be merged + let res1 = signed_reg_1.merge(&signed_reg_2); + let res2 = signed_reg_2.merge(&signed_reg_1); assert!( matches!(&res1, Err(err) if err == &Error::DifferentBaseRegister), "Unexpected result: {res1:?}" @@ -522,85 +316,6 @@ mod tests { Ok(()) } - #[test] - fn register_concurrent_write_ops() -> eyre::Result<()> { - let authority_sk1 = SecretKey::random(); - let authority1 = authority_sk1.public_key(); - let authority_sk2 = SecretKey::random(); - let authority2 = authority_sk2.public_key(); - - let meta: XorName = xor_name::rand::random(); - - // We'll have 'authority1' as the owner in both replicas and - // grant permissions for Write to 'authority2' in both replicas too - let perms = Permissions::new_with([authority1, authority2]); - - // Instantiate the same Register on two replicas - let mut replica1 = Register::new(authority_sk1.public_key(), meta, perms); - let mut replica2 = replica1.clone(); - - // And let's write an item to replica1 with autority1 - let item1 = random_register_entry(); - let (_, op1) = replica1.write(item1, &BTreeSet::new(), &authority_sk1)?; - - // Let's assert current state on both replicas - assert_eq!(replica1.size(), 1); - assert_eq!(replica2.size(), 0); - - // Concurrently write another item with authority2 on replica2 - let item2 = random_register_entry(); - let (_, op2) = replica2.write(item2, &BTreeSet::new(), &authority_sk2)?; - - // Item should be writed on replica2 - assert_eq!(replica2.size(), 1); - - // Write operations are now broadcasted and applied to both replicas - replica1.apply_op(op2)?; - replica2.apply_op(op1)?; - - // Let's assert data convergence on both replicas - verify_data_convergence(&[replica1, replica2], 2)?; - - Ok(()) - } - - #[test] - fn register_get_by_hash() -> eyre::Result<()> { - let (sk, register) = &mut create_reg_replicas(1)[0]; - - let entry1 = random_register_entry(); - let entry2 = random_register_entry(); - let entry3 = random_register_entry(); - - let (entry1_hash, _) = register.write(entry1.clone(), &BTreeSet::new(), sk)?; - - // this creates a fork since entry1 is not set as child of entry2 - let (entry2_hash, _) = register.write(entry2.clone(), &BTreeSet::new(), sk)?; - - // we'll write entry2 but having the entry1 and entry2 as children, - // i.e. solving the fork created by them - let children = [entry1_hash, entry2_hash].into_iter().collect(); - - let (entry3_hash, _) = register.write(entry3.clone(), &children, sk)?; - - assert_eq!(register.size(), 3); - - let first_entry = register.get(entry1_hash)?; - assert_eq!(first_entry, &entry1); - - let second_entry = register.get(entry2_hash)?; - assert_eq!(second_entry, &entry2); - - let third_entry = register.get(entry3_hash)?; - assert_eq!(third_entry, &entry3); - - let non_existing_hash = EntryHash::default(); - let entry_not_found = register.get(non_existing_hash); - assert_eq!(entry_not_found, Err(Error::NoSuchEntry(non_existing_hash))); - - Ok(()) - } - #[test] fn register_query_public_perms() -> eyre::Result<()> { let meta = xor_name::rand::random(); @@ -627,21 +342,27 @@ mod tests { // check register 1 is public assert_eq!(replica1.owner(), authority_pk1); - assert_eq!(replica1.check_user_permissions(owner1), Ok(())); - assert_eq!(replica1.check_user_permissions(owner2), Ok(())); - assert_eq!(replica1.check_user_permissions(random_user), Ok(())); - assert_eq!(replica1.check_user_permissions(random_user2), Ok(())); + assert_eq!(replica1.register.check_user_permissions(owner1), Ok(())); + assert_eq!(replica1.register.check_user_permissions(owner2), Ok(())); + assert_eq!( + replica1.register.check_user_permissions(random_user), + Ok(()) + ); + assert_eq!( + replica1.register.check_user_permissions(random_user2), + Ok(()) + ); // check register 2 has only owner1 and owner2 write allowed assert_eq!(replica2.owner(), authority_pk2); - assert_eq!(replica2.check_user_permissions(owner1), Ok(())); - assert_eq!(replica2.check_user_permissions(owner2), Ok(())); + assert_eq!(replica2.register.check_user_permissions(owner1), Ok(())); + assert_eq!(replica2.register.check_user_permissions(owner2), Ok(())); assert_eq!( - replica2.check_user_permissions(random_user), + replica2.register.check_user_permissions(random_user), Err(Error::AccessDenied(random_user)) ); assert_eq!( - replica2.check_user_permissions(random_user2), + replica2.register.check_user_permissions(random_user2), Err(Error::AccessDenied(random_user2)) ); @@ -654,25 +375,20 @@ mod tests { // one replica will allow write ops to anyone let authority_sk1 = SecretKey::random(); + let owner = authority_sk1.public_key(); let perms1 = Permissions::new_anyone_can_write(); + let address = RegisterAddress { meta, owner }; - let mut replica = create_reg_replica_with(meta, Some(authority_sk1), Some(perms1)); + let mut replica = create_reg_replica_with(meta, Some(authority_sk1.clone()), Some(perms1)); for _ in 0..MAX_REG_NUM_ENTRIES { - let (_hash, _op) = replica - .write( - random_register_entry(), - &BTreeSet::new(), - &SecretKey::random(), - ) - .context("Failed to write register entry")?; + let op = generate_random_op(address, &authority_sk1)?; + assert!(replica.add_op(op).is_ok()); } - let excess_entry = replica.write( - random_register_entry(), - &BTreeSet::new(), - &SecretKey::random(), - ); + let op = generate_random_op(address, &authority_sk1)?; + + let excess_entry = replica.add_op(op); match excess_entry { Err(Error::TooManyEntries(size)) => { @@ -693,14 +409,18 @@ mod tests { meta: XorName, perms: Option, count: usize, - ) -> Vec<(SecretKey, Register)> { - let replicas: Vec<(SecretKey, Register)> = (0..count) + ) -> Vec<(SecretKey, SignedRegister)> { + let replicas: Vec<(SecretKey, SignedRegister)> = (0..count) .map(|_| { let authority_sk = authority_sk.clone().unwrap_or_else(SecretKey::random); let authority = authority_sk.public_key(); let perms = perms.clone().unwrap_or_default(); let register = Register::new(authority, meta, perms); - (authority_sk, register) + + let signature = authority_sk.sign(register.bytes().unwrap()); + let signed_reg = SignedRegister::new(register, signature, Default::default()); + + (authority_sk, signed_reg) }) .collect(); @@ -708,424 +428,24 @@ mod tests { replicas } - fn create_reg_replicas(count: usize) -> Vec<(SecretKey, Register)> { - let meta = xor_name::rand::random(); - - gen_reg_replicas(None, meta, None, count) - } - fn create_reg_replica_with( meta: XorName, authority_sk: Option, perms: Option, - ) -> Register { + ) -> SignedRegister { let replicas = gen_reg_replicas(authority_sk, meta, perms, 1); replicas[0].1.clone() } - // verify data convergence on a set of replicas and with the expected length - fn verify_data_convergence(replicas: &[Register], expected_size: u64) -> Result<()> { - // verify all replicas have the same and expected size - for r in replicas { - assert_eq!(r.size(), expected_size); - } - - // now verify that the items are the same in all replicas - let r0 = &replicas[0]; - for r in replicas { - assert_eq!(r.crdt, r0.crdt); - } - - Ok(()) - } - - // Generate a vec of Register replicas of some length, with corresponding vec of keypairs for signing, and the overall owner of the register - fn generate_replicas( - max_quantity: usize, - ) -> impl Strategy, Arc)>> { - let xorname = xor_name::rand::random(); - - let owner_sk = Arc::new(SecretKey::random()); - let owner = owner_sk.public_key(); - let perms = Permissions::new_anyone_can_write(); - - (1..max_quantity + 1).prop_map(move |quantity| { - let mut replicas = Vec::with_capacity(quantity); - for _ in 0..quantity { - let replica = Register::new(owner, xorname, perms.clone()); - - replicas.push(replica); - } - - Ok((replicas, Arc::clone(&owner_sk))) - }) - } - - // Generate a Register entry - fn generate_reg_entry() -> impl Strategy> { - "\\PC*".prop_map(|s| s.into_bytes()) - } - - // Generate a vec of Register entries - fn generate_dataset(max_quantity: usize) -> impl Strategy>> { - prop::collection::vec(generate_reg_entry(), 1..max_quantity + 1) - } - - // Generates a vec of Register entries each with a value suggesting - // the delivery chance of the op that gets created with the entry - fn generate_dataset_and_probability( - max_quantity: usize, - ) -> impl Strategy, u8)>> { - prop::collection::vec((generate_reg_entry(), any::()), 1..max_quantity + 1) - } - - proptest! { - #[test] - fn proptest_reg_doesnt_crash_with_random_data( - _data in generate_reg_entry() - ) { - // Instantiate the same Register on two replicas - let meta = xor_name::rand::random(); - let owner_sk = SecretKey::random(); - let perms = Default::default(); - - let mut replicas = gen_reg_replicas( - Some(owner_sk.clone()), - meta, - Some(perms), - 2); - let (_, mut replica1) = replicas.remove(0); - let (_, mut replica2) = replicas.remove(0); - - // Write an item on replicas - let (_, op) = replica1.write(random_register_entry(), &BTreeSet::new(), &owner_sk)?; - replica2.apply_op(op)?; - - verify_data_convergence(&[replica1, replica2], 1)?; - } - - #[test] - fn proptest_reg_converge_with_many_random_data( - dataset in generate_dataset(1000) - ) { - // Instantiate the same Register on two replicas - let meta = xor_name::rand::random(); - let owner_sk = SecretKey::random(); - let perms = Default::default(); - - // Instantiate the same Register on two replicas - let mut replicas = gen_reg_replicas( - Some(owner_sk.clone()), - meta, - Some(perms), - 2); - let (_, mut replica1) = replicas.remove(0); - let (_, mut replica2) = replicas.remove(0); - - let dataset_length = dataset.len() as u64; - - // insert our data at replicas - let mut children = BTreeSet::new(); - for _data in dataset { - // Write an item on replica1 - let (hash, op) = replica1.write(random_register_entry(), &children, &owner_sk)?; - // now apply that op to replica 2 - replica2.apply_op(op)?; - children = vec![hash].into_iter().collect(); - } - - verify_data_convergence(&[replica1, replica2], dataset_length)?; - } - - #[test] - fn proptest_reg_converge_with_many_random_data_random_entry_children( - dataset in generate_dataset(1000) - ) { - // Instantiate the same Register on two replicas - let meta = xor_name::rand::random(); - let owner_sk = SecretKey::random(); - let perms = Default::default(); - - // Instantiate the same Register on two replicas - let mut replicas = gen_reg_replicas( - Some(owner_sk.clone()), - meta, - Some(perms), - 2); - let (_, mut replica1) = replicas.remove(0); - let (_, mut replica2) = replicas.remove(0); - - let dataset_length = dataset.len() as u64; - - // insert our data at replicas - let mut list_of_hashes = Vec::new(); - let mut rng = thread_rng(); - for _data in dataset { - // choose a random set of children - let num_of_children: usize = rng.gen(); - let children = list_of_hashes.choose_multiple(&mut OsRng, num_of_children).cloned().collect(); - - // Write an item on replica1 using the randomly generated set of children - let (hash, op) = replica1.write(random_register_entry(), &children, &owner_sk)?; - - // now apply that op to replica 2 - replica2.apply_op(op)?; - list_of_hashes.push(hash); - } - - verify_data_convergence(&[replica1, replica2], dataset_length)?; - } - - #[test] - fn proptest_reg_converge_with_many_random_data_across_arbitrary_number_of_replicas( - dataset in generate_dataset(500), - res in generate_replicas(50) - ) { - let (mut replicas, owner_sk) = res?; - let dataset_length = dataset.len() as u64; - - // insert our data at replicas - let mut children = BTreeSet::new(); - for _data in dataset { - // first generate an op from one replica... - let (hash, op)= replicas[0].write(random_register_entry(), &children, &owner_sk)?; - - // then apply this to all replicas - for replica in &mut replicas { - replica.apply_op(op.clone())?; - } - children = vec![hash].into_iter().collect(); - } - - verify_data_convergence(&replicas, dataset_length)?; - - } - - #[test] - fn proptest_converge_with_shuffled_op_set_across_arbitrary_number_of_replicas( - dataset in generate_dataset(100), - res in generate_replicas(500) - ) { - let (mut replicas, owner_sk) = res?; - let dataset_length = dataset.len() as u64; - - // generate an ops set from one replica - let mut ops = vec![]; - - let mut children = BTreeSet::new(); - for _data in dataset { - let (hash, op) = replicas[0].write(random_register_entry(), &children, &owner_sk)?; - ops.push(op); - children = vec![hash].into_iter().collect(); - } - - // now we randomly shuffle ops and apply at each replica - for replica in &mut replicas { - let mut ops = ops.clone(); - ops.shuffle(&mut OsRng); - - for op in ops { - replica.apply_op(op)?; - } - } - - verify_data_convergence(&replicas, dataset_length)?; - } - - #[test] - fn proptest_converge_with_shuffled_ops_from_many_replicas_across_arbitrary_number_of_replicas( - dataset in generate_dataset(1000), - res in generate_replicas(7) - ) { - let (mut replicas, owner_sk) = res?; - let dataset_length = dataset.len() as u64; - - // generate an ops set using random replica for each data - let mut ops = vec![]; - let mut children = BTreeSet::new(); - for _data in dataset { - if let Some(replica) = replicas.choose_mut(&mut OsRng) - { - let (hash, op) = replica.write(random_register_entry(), &children, &owner_sk)?; - ops.push(op); - children = vec![hash].into_iter().collect(); - } - } - - let opslen = ops.len() as u64; - prop_assert_eq!(dataset_length, opslen); - - // now we randomly shuffle ops and apply at each replica - for replica in &mut replicas { - let mut ops = ops.clone(); - ops.shuffle(&mut OsRng); - - for op in ops { - replica.apply_op(op)?; - } - } - - verify_data_convergence(&replicas, dataset_length)?; - } - - #[test] - fn proptest_dropped_data_can_be_reapplied_and_we_converge( - dataset in generate_dataset_and_probability(1000), - ) { - // Instantiate the same Register on two replicas - let meta = xor_name::rand::random(); - let owner_sk = SecretKey::random(); - let perms = Default::default(); - - // Instantiate the same Register on two replicas - let mut replicas = gen_reg_replicas( - Some(owner_sk.clone()), - meta, - Some(perms), - 2); - let (_, mut replica1) = replicas.remove(0); - let (_, mut replica2) = replicas.remove(0); - - let dataset_length = dataset.len() as u64; - - let mut ops = vec![]; - let mut children = BTreeSet::new(); - for (_data, delivery_chance) in dataset { - let (hash, op)= replica1.write(random_register_entry(), &children, &owner_sk)?; - - ops.push((op, delivery_chance)); - children = vec![hash].into_iter().collect(); - } - - for (op, delivery_chance) in ops.clone() { - if delivery_chance < u8::MAX / 3 { - replica2.apply_op(op)?; - } - } - - // here we statistically should have dropped some messages - if dataset_length > 50 { - assert_ne!(replica2.size(), replica1.size()); - } - - // reapply all ops - for (op, _) in ops { - replica2.apply_op(op)?; - } - - // now we converge - verify_data_convergence(&[replica1, replica2], dataset_length)?; - } - - #[test] - fn proptest_converge_with_shuffled_ops_from_many_while_dropping_some_at_random( - dataset in generate_dataset_and_probability(1000), - res in generate_replicas(7), - ) { - let (mut replicas, owner_sk) = res?; - let dataset_length = dataset.len() as u64; - - // generate an ops set using random replica for each data - let mut ops = vec![]; - let mut children = BTreeSet::new(); - for (_data, delivery_chance) in dataset { - // a random index within the replicas range - let index: usize = OsRng.gen_range(0..replicas.len()); - let replica = &mut replicas[index]; - - let (hash, op)=replica.write(random_register_entry(), &children, &owner_sk)?; - ops.push((op, delivery_chance)); - children = vec![hash].into_iter().collect(); - } - - let opslen = ops.len() as u64; - prop_assert_eq!(dataset_length, opslen); - - // now we randomly shuffle ops and apply at each replica - for replica in &mut replicas { - let mut ops = ops.clone(); - ops.shuffle(&mut OsRng); - - for (op, delivery_chance) in ops.clone() { - if delivery_chance > u8::MAX / 3 { - replica.apply_op(op)?; - } - } - - // reapply all ops, simulating lazy messaging filling in the gaps - for (op, _) in ops { - replica.apply_op(op)?; - } - } - - verify_data_convergence(&replicas, dataset_length)?; - } - - #[test] - fn proptest_converge_with_shuffled_ops_including_bad_ops_which_error_and_are_not_applied( - dataset in generate_dataset(10), - bogus_dataset in generate_dataset(10), // should be same number as dataset - gen_replicas_result in generate_replicas(10), - - ) { - let (mut replicas, owner_sk) = gen_replicas_result?; - let dataset_length = dataset.len(); - let bogus_dataset_length = bogus_dataset.len(); - let number_replicas = replicas.len(); - - // generate the real ops set using random replica for each data - let mut ops = vec![]; - let mut children = BTreeSet::new(); - for _data in dataset { - if let Some(replica) = replicas.choose_mut(&mut OsRng) - { - let (hash, op)=replica.write(random_register_entry(), &children, &owner_sk)?; - ops.push(op); - children = vec![hash].into_iter().collect(); - } - } - - // set up a replica that has nothing to do with the rest, random xor... different owner... - let xorname = xor_name::rand::random(); - let random_owner_sk = SecretKey::random(); - let mut bogus_replica = Register::new(random_owner_sk.public_key(), xorname, Permissions::default()); - - // add bogus ops from bogus replica + bogus data - let mut children = BTreeSet::new(); - for _data in bogus_dataset { - let (hash, bogus_op) = bogus_replica.write(random_register_entry(), &children, &random_owner_sk)?; - bogus_replica.apply_op(bogus_op.clone())?; - ops.push(bogus_op); - children = vec![hash].into_iter().collect(); - } - - let opslen = ops.len(); - prop_assert_eq!(dataset_length + bogus_dataset_length, opslen); - - let mut err_count = vec![]; - // now we randomly shuffle ops and apply at each replica - for replica in &mut replicas { - let mut ops = ops.clone(); - ops.shuffle(&mut OsRng); - - for op in ops { - match replica.apply_op(op) { - Ok(_) => {}, - // record all errors to check this matches bogus data - Err(error) => {err_count.push(error)}, - } - } - } - - // check we get an error per bogus datum per replica - assert_eq!(err_count.len(), bogus_dataset_length * number_replicas); - - verify_data_convergence(&replicas, dataset_length as u64)?; - } - } - fn random_register_entry() -> Vec { let random_bytes = thread_rng().gen::<[u8; 32]>(); random_bytes.to_vec() } + + fn generate_random_op(address: RegisterAddress, writer_sk: &SecretKey) -> Result { + let mut crdt_reg = RegisterCrdt::new(address); + let item = random_register_entry(); + let (_hash, addr, crdt_op) = crdt_reg.write(item, &BTreeSet::new())?; + Ok(RegisterOp::new(addr, crdt_op, writer_sk)) + } } diff --git a/sn_registers/src/register_op.rs b/sn_registers/src/register_op.rs index 936529cdf1..455d26b43d 100644 --- a/sn_registers/src/register_op.rs +++ b/sn_registers/src/register_op.rs @@ -39,7 +39,7 @@ impl std::hash::Hash for RegisterOp { impl RegisterOp { /// Create a new RegisterOp - pub(crate) fn new( + pub fn new( address: RegisterAddress, crdt_op: MerkleDagEntry, signer: &SecretKey, diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 1589ad0997..b0f60bc453 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,23 +7,23 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.14" +version = "0.4.0" [dependencies] async-trait = "0.1" dirs-next = "2.0.0" -libp2p = { version = "0.53", features = ["kad"] } +libp2p = { version = "0.54.1", features = ["kad"] } libp2p-identity = { version = "0.2.7", features = ["rand"] } prost = { version = "0.9" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index 107b01d635..c9d853a009 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -11,9 +11,9 @@ use async_trait::async_trait; use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; use service_manager::{ServiceInstallCtx, ServiceLabel}; +use sn_evm::{AttoTokens, EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_protocol::get_port_from_multiaddr; -use sn_transfers::NanoTokens; use std::{ ffi::OsString, net::{Ipv4Addr, SocketAddr}, @@ -126,6 +126,20 @@ impl<'a> ServiceStateActions for NodeService<'a> { args.push(OsString::from(peers_str)); } + args.push(OsString::from(self.service_data.evm_network.to_string())); + if let EvmNetwork::Custom(custom_network) = &self.service_data.evm_network { + args.push(OsString::from("--rpc-url")); + args.push(OsString::from(custom_network.rpc_url_http.to_string())); + args.push(OsString::from("--payment-token-address")); + args.push(OsString::from( + custom_network.payment_token_address.to_string(), + )); + args.push(OsString::from("--data-payments-address")); + args.push(OsString::from( + custom_network.data_payments_address.to_string(), + )); + } + Ok(ServiceInstallCtx { args, autostart: options.auto_restart, @@ -269,6 +283,8 @@ pub struct NodeServiceData { )] pub connected_peers: Option>, pub data_dir_path: PathBuf, + #[serde(default)] + pub evm_network: EvmNetwork, pub genesis: bool, pub home_network: bool, pub listen_addr: Option>, @@ -292,7 +308,9 @@ pub struct NodeServiceData { )] pub peer_id: Option, pub pid: Option, - pub reward_balance: Option, + #[serde(default)] + pub rewards_address: RewardsAddress, + pub reward_balance: Option, pub rpc_socket_addr: SocketAddr, pub safenode_path: PathBuf, pub service_name: String, diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index 4027c8c302..cbd6206fba 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.19.3" +version = "0.20.0" [features] reward-forward = [] @@ -21,7 +21,7 @@ custom_debug = "~0.6.1" dirs-next = "~2.0.0" hex = "~0.4.3" lazy_static = "~1.4.0" -libp2p = { version = "0.53", features = ["identify", "kad"] } +libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = { version = "~0.8.5", features = ["small_rng"] } rmp-serde = "1.1.1" secrecy = "0.8.0" diff --git a/sn_transfers/src/wallet/data_payments.rs b/sn_transfers/src/wallet/data_payments.rs index 90e05e179c..b200ff4c97 100644 --- a/sn_transfers/src/wallet/data_payments.rs +++ b/sn_transfers/src/wallet/data_payments.rs @@ -106,6 +106,10 @@ pub struct PaymentQuote { pub timestamp: SystemTime, /// quoting metrics being used to generate this quote pub quoting_metrics: QuotingMetrics, + /// list of bad_nodes that client shall not pick as a payee + /// in `serialised` format to avoid cyclic dependent on sn_protocol + #[debug(skip)] + pub bad_nodes: Vec, /// node's public key that can verify the signature #[debug(skip)] pub pub_key: Vec, @@ -121,6 +125,7 @@ impl PaymentQuote { cost: NanoTokens::zero(), timestamp: SystemTime::now(), quoting_metrics: Default::default(), + bad_nodes: vec![], pub_key: vec![], signature: vec![], } @@ -132,6 +137,7 @@ impl PaymentQuote { cost: NanoTokens, timestamp: SystemTime, quoting_metrics: &QuotingMetrics, + serialised_bad_nodes: &[u8], ) -> Vec { let mut bytes = xorname.to_vec(); bytes.extend_from_slice(&cost.to_bytes()); @@ -144,6 +150,7 @@ impl PaymentQuote { ); let serialised_quoting_metrics = rmp_serde::to_vec(quoting_metrics).unwrap_or_default(); bytes.extend_from_slice(&serialised_quoting_metrics); + bytes.extend_from_slice(serialised_bad_nodes); bytes } @@ -168,6 +175,7 @@ impl PaymentQuote { self.cost, self.timestamp, &self.quoting_metrics, + &self.bad_nodes, ); if !pub_key.verify(&bytes, &self.signature) { @@ -196,6 +204,7 @@ impl PaymentQuote { cost, timestamp: SystemTime::now(), quoting_metrics: Default::default(), + bad_nodes: vec![], pub_key: vec![], signature: vec![], } @@ -294,6 +303,7 @@ mod tests { quote.cost, quote.timestamp, "e.quoting_metrics, + &[], ); let signature = if let Ok(sig) = keypair.sign(&bytes) { sig diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index eae9c286f1..48955e7e8c 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,11 +7,18 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.7" +version = "0.4.8" + +[features] +local = ["sn_peers_acquisition/local"] [dependencies] +bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "~0.6.2" dirs-next = "~2.0.0" -libp2p = { version="0.53", features = ["identify", "kad"] } -serde = { version = "1.0.133", features = [ "derive"]} +evmlib = { path = "../evmlib", version = "0.1.1" } +libp2p = { version = "0.54.1", features = ["identify", "kad"] } +rand = "0.8.5" +serde = { version = "1.0.133", features = ["derive"] } serde_json = "1.0" +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } diff --git a/test_utils/src/evm.rs b/test_utils/src/evm.rs new file mode 100644 index 0000000000..05eb710bde --- /dev/null +++ b/test_utils/src/evm.rs @@ -0,0 +1,39 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use color_eyre::{ + eyre::{bail, Context}, + Result, +}; +use evmlib::{utils::get_evm_network_from_env, wallet::Wallet, Network}; +use std::env; + +pub fn get_funded_wallet() -> evmlib::wallet::Wallet { + let network = + get_evm_network_from_env().expect("Failed to get EVM network from environment variables"); + if matches!(network, Network::ArbitrumOne) { + panic!("You're trying to use ArbitrumOne network. Use a custom network for testing."); + } + // Default deployer wallet of the testnet. + const DEFAULT_WALLET_PRIVATE_KEY: &str = + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + + let private_key = env::var("SECRET_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); + + Wallet::new_from_private_key(network, &private_key).expect("Invalid private key") +} + +pub fn get_new_wallet() -> Result { + let network = get_evm_network_from_env() + .wrap_err("Failed to get EVM network from environment variables")?; + if matches!(network, Network::ArbitrumOne) { + bail!("You're trying to use ArbitrumOne network. Use a custom network for testing."); + } + + Ok(Wallet::new_with_random_wallet(network)) +} diff --git a/test_utils/src/lib.rs b/test_utils/src/lib.rs index 3466e43bc4..3151878ade 100644 --- a/test_utils/src/lib.rs +++ b/test_utils/src/lib.rs @@ -6,4 +6,32 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +pub mod evm; pub mod testnet; + +use bytes::Bytes; +use color_eyre::eyre::Result; +use libp2p::Multiaddr; +use rand::Rng; +use sn_peers_acquisition::parse_peer_addr; + +/// Generate random data of the given length. +pub fn gen_random_data(len: usize) -> Bytes { + let mut data = vec![0u8; len]; + rand::thread_rng().fill(&mut data[..]); + Bytes::from(data) +} + +/// Parse the `SAFE_PEERS` env var into a list of Multiaddrs. +/// +/// An empty `Vec` will be returned if the env var is not set or if local discovery is enabled. +pub fn peers_from_env() -> Result> { + let bootstrap_peers = if cfg!(feature = "local") { + Ok(vec![]) + } else if let Ok(peers_str) = std::env::var("SAFE_PEERS") { + peers_str.split(',').map(parse_peer_addr).collect() + } else { + Ok(vec![]) + }?; + Ok(bootstrap_peers) +} diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index b040667397..22cdd87d1c 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.54" +version = "0.1.55" [dependencies]