From 33cb6e8cc418be950173bd27e310cecb29a821cd Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 15 Oct 2024 14:57:32 +0200 Subject: [PATCH 001/128] feat(autonomi): archive API in wasm --- autonomi/index.html | 26 +++++++++----- autonomi/src/client/wasm.rs | 69 ++++++++++++++++++++++++++----------- 2 files changed, 67 insertions(+), 28 deletions(-) diff --git a/autonomi/index.html b/autonomi/index.html index bd806016ca..22d1fca468 100644 --- a/autonomi/index.html +++ b/autonomi/index.html @@ -5,7 +5,7 @@ + + + + + + + + \ No newline at end of file diff --git a/autonomi/tests-js/index.js b/autonomi/tests-js/index.js new file mode 100644 index 0000000000..8720eaac54 --- /dev/null +++ b/autonomi/tests-js/index.js @@ -0,0 +1,64 @@ +import init, * as atnm from '../pkg/autonomi.js'; +import { assert } from './node_modules/chai/chai.js'; + +function randomData(len) { + const array = new Uint8Array(len); + window.crypto.getRandomValues(array); + return array; +} + +describe('autonomi', function () { + this.timeout(180 * 1000); + + let client; + let wallet; + before(async () => { + await init(); + atnm.logInit("sn_networking=warn,autonomi=trace"); + client = await atnm.Client.connect([window.peer_addr]); + wallet = atnm.getFundedWallet(); + }); + + it('calculates cost', async () => { + const data = randomData(32); + const cost = await client.dataCost(data); + + assert.typeOf(Number.parseFloat(cost.toString()), 'number'); + }); + + it('puts data (32 bytes)', async () => { + const data = randomData(32); + const addr = await client.dataPut(data, wallet); + + assert.typeOf(addr, 'string'); + }); + + it('puts data and gets it (32 bytes)', async () => { + const data = randomData(32); + const addr = await client.dataPut(data, wallet); + const fetchedData = await client.dataGet(addr); + + assert.deepEqual(Array.from(data), Array.from(fetchedData)); + }); + + it('puts data, creates archive and retrieves it', async () => { + const data = randomData(32); + const addr = await client.dataPut(data, wallet); + const archive = new Map([["foo", addr]]); + const archiveAddr = await client.archivePut(archive, wallet); + + const archiveFetched = await client.archiveGet(archiveAddr); + + assert.deepEqual(archive, archiveFetched); + }); + + it('writes bytes to vault and fetches it', async () => { + const data = randomData(32); + const secretKey = randomData(32); + + await client.writeBytesToVault(data, wallet, secretKey); + const dataFetched = await client.fetchAndDecryptVault(secretKey); + + assert.deepEqual(data, dataFetched); + }); +}); diff --git a/autonomi/tests-js/package-lock.json b/autonomi/tests-js/package-lock.json new file mode 100644 index 0000000000..61daae0de2 --- /dev/null +++ b/autonomi/tests-js/package-lock.json @@ -0,0 +1,1481 @@ +{ + "name": "tests-js", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "devDependencies": { + "chai": "^5.1.1", + "http-server": "^14.1.1", + "mocha": "^10.7.3" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", + "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/async": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash": "^4.17.14" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/basic-auth": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz", + "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "5.1.2" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/basic-auth/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true, + "license": "ISC" + }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/chai": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.1.1.tgz", + "integrity": "sha512-pT1ZgP8rPNqUgieVaEY+ryQr6Q4HXNg8Ei9UnLUrjN4IA7dvQC5JB+/kxVcPNDHyBcc/26CXPkbNzq3qwrOEKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/corser": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/corser/-/corser-2.0.1.tgz", + "integrity": "sha512-utCYNzRSQIZNPIcGZdQc92UVJYAhtGAteCFg0yRaFm8f0P+CPtyGyHXJcGXnffjCybUCEx3FQ2G7U3/o9eIkVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/diff": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "license": "BSD-3-Clause", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", + "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/glob": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", + "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/html-encoding-sniffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz", + "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-encoding": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-server": { + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/http-server/-/http-server-14.1.1.tgz", + "integrity": "sha512-+cbxadF40UXd9T01zUHgA+rlo2Bg1Srer4+B4NwIHdaGxAGGv59nYRnGGDJ9LBk7alpS0US+J+bLLdQOOkJq4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "basic-auth": "^2.0.1", + "chalk": "^4.1.2", + "corser": "^2.0.1", + "he": "^1.2.0", + "html-encoding-sniffer": "^3.0.0", + "http-proxy": "^1.18.1", + "mime": "^1.6.0", + "minimist": "^1.2.6", + "opener": "^1.5.1", + "portfinder": "^1.0.28", + "secure-compare": "3.0.1", + "union": "~0.5.0", + "url-join": "^4.0.1" + }, + "bin": { + "http-server": "bin/http-server" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/loupe": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.2.tgz", + "integrity": "sha512-23I4pFZHmAemUnz8WZXbYRSKYj801VDaNv9ETuMh7IrMc7VuVVSo+Z9iLE3ni30+U48iDWfi30d3twAXBYmnCg==", + "dev": true, + "license": "MIT" + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "dev": true, + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mkdirp": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", + "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.6" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/mocha": { + "version": "10.7.3", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-10.7.3.tgz", + "integrity": "sha512-uQWxAu44wwiACGqjbPYmjo7Lg8sFrS3dQe7PP2FQI+woptP4vZXSMcfMyFL/e1yFEeEpV4RtyTpZROOKmxis+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-colors": "^4.1.3", + "browser-stdout": "^1.3.1", + "chokidar": "^3.5.3", + "debug": "^4.3.5", + "diff": "^5.2.0", + "escape-string-regexp": "^4.0.0", + "find-up": "^5.0.0", + "glob": "^8.1.0", + "he": "^1.2.0", + "js-yaml": "^4.1.0", + "log-symbols": "^4.1.0", + "minimatch": "^5.1.6", + "ms": "^2.1.3", + "serialize-javascript": "^6.0.2", + "strip-json-comments": "^3.1.1", + "supports-color": "^8.1.1", + "workerpool": "^6.5.1", + "yargs": "^16.2.0", + "yargs-parser": "^20.2.9", + "yargs-unparser": "^2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha.js" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", + "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "dev": true, + "license": "(WTFPL OR MIT)", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/pathval": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", + "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/portfinder": { + "version": "1.0.32", + "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.32.tgz", + "integrity": "sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "async": "^2.6.4", + "debug": "^3.2.7", + "mkdirp": "^0.5.6" + }, + "engines": { + "node": ">= 0.12.0" + } + }, + "node_modules/portfinder/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/secure-compare": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/secure-compare/-/secure-compare-3.0.1.tgz", + "integrity": "sha512-AckIIV90rPDcBcglUwXPF3kg0P0qmPsPXAj6BBEENQE1p5yA1xfmDJzfi1Tappj37Pv2mVbKpL3Z1T+Nn7k1Qw==", + "dev": true, + "license": "MIT" + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/union": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/union/-/union-0.5.0.tgz", + "integrity": "sha512-N6uOhuW6zO95P3Mel2I2zMsbsanvvtgn6jVqJv4vbVcz/JN0OkL9suomjQGmWtxJQXOCqUJvquc1sMeNz/IwlA==", + "dev": true, + "dependencies": { + "qs": "^6.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/url-join": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", + "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==", + "dev": true, + "license": "MIT" + }, + "node_modules/whatwg-encoding": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", + "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/workerpool": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.5.1.tgz", + "integrity": "sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "license": "MIT", + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/autonomi/tests-js/package.json b/autonomi/tests-js/package.json new file mode 100644 index 0000000000..b33e6d0e30 --- /dev/null +++ b/autonomi/tests-js/package.json @@ -0,0 +1,11 @@ +{ + "type": "module", + "scripts": { + "serve": "http-server -a 127.0.0.1 ../" + }, + "devDependencies": { + "chai": "^5.1.1", + "http-server": "^14.1.1", + "mocha": "^10.7.3" + } +} \ No newline at end of file From bca9503ceced5707383b62749fba529af9dc1263 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Wed, 16 Oct 2024 16:28:00 +0200 Subject: [PATCH 022/128] fix(autonomi): gen secret key in the rigth way --- autonomi/index.html | 129 ++++++++++++++++++------------------ autonomi/src/client/wasm.rs | 6 ++ autonomi/tests-js/index.js | 2 +- 3 files changed, 73 insertions(+), 64 deletions(-) diff --git a/autonomi/index.html b/autonomi/index.html index 6f4d8182ad..dabb773cd2 100644 --- a/autonomi/index.html +++ b/autonomi/index.html @@ -1,67 +1,70 @@ - - - - - - + // Generate random secret key + const secretKey = [...Array(32)].map(() => Math.floor(Math.random() * 9)); + + await client.writeBytesToVault(data, wallet, secretKey); + + const vault = await client.fetchAndDecryptVault(secretKey); + console.log("vault: ", vault); + } + + document.getElementById("btn-run").addEventListener("click", run, false); + + + + + - - - - + \ No newline at end of file diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index 2ff60c2ac2..749baf35e5 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -149,6 +149,12 @@ mod vault { } } +#[wasm_bindgen(js_name = genSecretKey)] +pub fn gen_secret_key() -> Vec { + let secret_key = bls::SecretKey::random(); + secret_key.to_bytes().to_vec() +} + #[wasm_bindgen(js_name = Wallet)] pub struct JsWallet(evmlib::wallet::Wallet); diff --git a/autonomi/tests-js/index.js b/autonomi/tests-js/index.js index 8720eaac54..0e5edacf55 100644 --- a/autonomi/tests-js/index.js +++ b/autonomi/tests-js/index.js @@ -54,7 +54,7 @@ describe('autonomi', function () { it('writes bytes to vault and fetches it', async () => { const data = randomData(32); - const secretKey = randomData(32); + const secretKey = atnm.genSecretKey(); await client.writeBytesToVault(data, wallet, secretKey); const dataFetched = await client.fetchAndDecryptVault(secretKey); From 7ff98aacf79d257e99a22b33dd891bfc4b16084c Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Thu, 17 Oct 2024 14:13:32 +0200 Subject: [PATCH 023/128] docs(autonomi): add wasm README --- autonomi/README.md | 40 +++++++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/autonomi/README.md b/autonomi/README.md index 3b27c6b0f0..babfa51eed 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -35,9 +35,9 @@ cargo run --bin=safenode-manager --features=local -- local run --build --clean - 4. Then run the tests with the `local` feature and pass the EVM params again: ```sh -$ EVM_NETWORK=local cargo test --package=autonomi --features=local +EVM_NETWORK=local cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local -- --nocapture +RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local -- --nocapture ``` ### Using a live testnet or mainnet @@ -55,9 +55,9 @@ cargo run --bin=safenode-manager --features=local -- local run --build --clean - payment tokens on the network (in this case Arbitrum One): ```sh -$ EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local +EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture +RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture ``` ### WebAssembly @@ -70,10 +70,40 @@ To run a WASM test - Optionally specify the specific test, e.g. `-- put` to run `put()` in `wasm.rs` only. Example: -````sh +```sh SAFE_PEERS=/ip4//tcp//ws/p2p/ wasm-pack test --release --firefox autonomi --features=data,files --test wasm -- put ``` +#### Test from JS in the browser + +`wasm-pack test` does not execute JavaScript, but runs mostly WebAssembly. Again make sure the environment variables are set and build the JS package: + +```sh +wasm-pack build --dev --target=web autonomi --features=vault +``` + +Then cd into `autonomi/tests-js`, and use `npm` to install and serve the test html file. +``` +cd autonomi/tests-js +npm install +npm run serve +``` + +Then go to `http://127.0.0.1:8080/tests-js` in the browser. Here, enter a `ws` multiaddr of a local node and press 'run'. + + +#### `index.html` + +There is also a simple `index.html` file that runs some JavaScript. + +Build the package (again with the env variables) and run a webserver, e.g. with Python: +```sh +wasm-pack build --dev --target=web autonomi +python -m http.server --directory=autonomi 8000 +``` + +Then visit `http://127.0.0.1:8000/` in your (modern) browser. + ## Faucet (local) From 5f5cdb7b02b7ab306e94c7c44f853bbeaf690d54 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Thu, 17 Oct 2024 16:04:51 +0200 Subject: [PATCH 024/128] fix(networking): use platform Instant::now --- sn_networking/src/event/kad.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 88a2a7ffca..77bf622fcb 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::NetworkSwarmCmd, driver::PendingGetClosestType, get_quorum_value, GetRecordCfg, - GetRecordError, NetworkError, Result, SwarmDriver, CLOSE_GROUP_SIZE, + cmd::NetworkSwarmCmd, driver::PendingGetClosestType, get_quorum_value, target_arch::Instant, + GetRecordCfg, GetRecordError, NetworkError, Result, SwarmDriver, CLOSE_GROUP_SIZE, }; use itertools::Itertools; use libp2p::{ @@ -23,10 +23,7 @@ use sn_protocol::{ storage::get_type_from_record, NetworkAddress, PrettyPrintRecordKey, }; -use std::{ - collections::{hash_map::Entry, HashSet}, - time::Instant, -}; +use std::collections::{hash_map::Entry, HashSet}; use tokio::sync::oneshot; use xor_name::XorName; From 97bfbe304d0d0023b0fe54dc17aa3879a7235d27 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 17 Oct 2024 12:56:37 +0900 Subject: [PATCH 025/128] chore(networking): rename get_closest_peers -> get_close_group_closest_peers --- sn_networking/src/lib.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index c9244dbc46..c24de2f790 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -399,7 +399,7 @@ impl Network { /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. /// Excludes the client's `PeerId` while calculating the closest peers. pub async fn client_get_closest_peers(&self, key: &NetworkAddress) -> Result> { - self.get_closest_peers(key, true).await + self.get_close_group_closest_peers(key, true).await } /// Returns a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that @@ -449,7 +449,9 @@ impl Network { // Do not query the closest_peers during every re-try attempt. // The close_nodes don't change often and the previous set of close_nodes might be taking a while to write // the Chunk, so query them again incase of a failure. - close_nodes = self.get_closest_peers(&chunk_address, true).await?; + close_nodes = self + .get_close_group_closest_peers(&chunk_address, true) + .await?; } retry_attempts += 1; info!( @@ -514,7 +516,9 @@ impl Network { ) -> Result { // The requirement of having at least CLOSE_GROUP_SIZE // close nodes will be checked internally automatically. - let mut close_nodes = self.get_closest_peers(&record_address, true).await?; + let mut close_nodes = self + .get_close_group_closest_peers(&record_address, true) + .await?; // Filter out results from the ignored peers. close_nodes.retain(|peer_id| !ignore_peers.contains(peer_id)); @@ -597,7 +601,9 @@ impl Network { let record_address = NetworkAddress::from_record_key(&key); // The requirement of having at least CLOSE_GROUP_SIZE // close nodes will be checked internally automatically. - let close_nodes = self.get_closest_peers(&record_address, true).await?; + let close_nodes = self + .get_close_group_closest_peers(&record_address, true) + .await?; let self_address = NetworkAddress::from_peer(self.peer_id()); let request = Request::Query(Query::GetRegisterRecord { @@ -1069,7 +1075,7 @@ impl Network { /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. /// If `client` is false, then include `self` among the `closest_peers` - pub async fn get_closest_peers( + pub async fn get_close_group_closest_peers( &self, key: &NetworkAddress, client: bool, From 44f1f8755525553d91bbc3414f4c028222ff5989 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 17 Oct 2024 13:08:35 +0900 Subject: [PATCH 026/128] feat(networking): use range based gets for close_peers calls This should allow us to return more peers for various network operations, and leverage range based gets more frequently --- sn_networking/src/cmd.rs | 8 ++-- sn_networking/src/lib.rs | 85 +++++++++++++++++++++++++++++++++++++--- sn_node/src/node.rs | 2 +- 3 files changed, 84 insertions(+), 11 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 5ec9ebd827..e2d92edc22 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -62,7 +62,7 @@ pub enum LocalSwarmCmd { sender: oneshot::Sender>, }, /// Return the current GetRange as determined by the SwarmDriver - GetCurrentRange { + GetCurrentRequestRange { sender: oneshot::Sender, }, /// Get a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that @@ -247,7 +247,7 @@ impl Debug for LocalSwarmCmd { LocalSwarmCmd::GetKBuckets { .. } => { write!(f, "LocalSwarmCmd::GetKBuckets") } - LocalSwarmCmd::GetCurrentRange { .. } => { + LocalSwarmCmd::GetCurrentRequestRange { .. } => { write!(f, "SwarmCmd::GetCurrentRange") } LocalSwarmCmd::GetAllLocalPeersExcludingSelf { .. } => { @@ -744,8 +744,8 @@ impl SwarmDriver { .record_addresses(); let _ = sender.send(addresses); } - LocalSwarmCmd::GetCurrentRange { sender } => { - cmd_string = "GetCurrentRange"; + LocalSwarmCmd::GetCurrentRequestRange { sender } => { + cmd_string = "GetCurrentRequestRange"; let _ = sender.send(self.get_request_range()); } LocalSwarmCmd::GetKBuckets { sender } => { diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index c24de2f790..1ab832f8a6 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -260,7 +260,7 @@ impl Network { /// Return the GetRange as determined by the internal SwarmDriver pub async fn get_range(&self) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_local_swarm_cmd(LocalSwarmCmd::GetCurrentRange { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetCurrentRequestRange { sender }); receiver.await.map_err(NetworkError::from) } @@ -398,8 +398,12 @@ impl Network { /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. /// Excludes the client's `PeerId` while calculating the closest peers. - pub async fn client_get_closest_peers(&self, key: &NetworkAddress) -> Result> { - self.get_close_group_closest_peers(key, true).await + pub async fn client_get_all_close_peers_in_range_or_close_group( + &self, + key: &NetworkAddress, + ) -> Result> { + self.get_all_close_peers_in_range_or_close_group(key, true) + .await } /// Returns a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that @@ -450,7 +454,7 @@ impl Network { // The close_nodes don't change often and the previous set of close_nodes might be taking a while to write // the Chunk, so query them again incase of a failure. close_nodes = self - .get_close_group_closest_peers(&chunk_address, true) + .client_get_all_close_peers_in_range_or_close_group(&chunk_address) .await?; } retry_attempts += 1; @@ -517,7 +521,7 @@ impl Network { // The requirement of having at least CLOSE_GROUP_SIZE // close nodes will be checked internally automatically. let mut close_nodes = self - .get_close_group_closest_peers(&record_address, true) + .client_get_all_close_peers_in_range_or_close_group(&record_address) .await?; // Filter out results from the ignored peers. close_nodes.retain(|peer_id| !ignore_peers.contains(peer_id)); @@ -602,7 +606,7 @@ impl Network { // The requirement of having at least CLOSE_GROUP_SIZE // close nodes will be checked internally automatically. let close_nodes = self - .get_close_group_closest_peers(&record_address, true) + .client_get_all_close_peers_in_range_or_close_group(&record_address) .await?; let self_address = NetworkAddress::from_peer(self.peer_id()); @@ -1117,6 +1121,75 @@ impl Network { Ok(closest_peers.into_iter().cloned().collect()) } + /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. + /// If `client` is false, then include `self` among the `closest_peers` + /// Returns all peers found inside the range + /// + /// If less than CLOSE_GROUP_SIZE peers are found, it will return all the peers found up to the CLOSE_GROUP_SIZE + pub async fn get_all_close_peers_in_range_or_close_group( + &self, + key: &NetworkAddress, + client: bool, + ) -> Result> { + let pretty_key = PrettyPrintKBucketKey(key.as_kbucket_key()); + debug!("Getting the all closest peers in range of {pretty_key:?}"); + let (sender, receiver) = oneshot::channel(); + self.send_network_swarm_cmd(NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { + key: key.clone(), + sender, + }); + + let found_peers = receiver.await?; + + // Count self in if among the CLOSE_GROUP_SIZE closest and sort the result + let result_len = found_peers.len(); + let mut closest_peers = found_peers; + + let expected_range = self.get_range().await?; + + // ensure we're not including self here + if client { + // remove our peer id from the calculations here: + closest_peers.retain(|&x| x != self.peer_id()); + if result_len != closest_peers.len() { + info!("Remove self client from the closest_peers"); + } + } + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let close_peers_pretty_print: Vec<_> = closest_peers + .iter() + .map(|peer_id| { + format!( + "{peer_id:?}({:?})", + PrettyPrintKBucketKey(NetworkAddress::from_peer(*peer_id).as_kbucket_key()) + ) + }) + .collect(); + + debug!( + "Network knowledge of closest peers to {pretty_key:?} are: {close_peers_pretty_print:?}" + ); + } + + let mut restricted_closest_peers = + sort_peers_by_address_and_limit_by_distance(&closest_peers, key, expected_range)?; + + if restricted_closest_peers.len() < CLOSE_GROUP_SIZE { + warn!( + "Getting close peers to {pretty_key:?} current GetRange of {:?} too strict giving insufficient peers... Falling back to all peers found" + , expected_range.ilog2()); + + restricted_closest_peers = + sort_peers_by_address_and_limit(&closest_peers, key, CLOSE_GROUP_SIZE)?; + } + + debug!( + "Network knowledge of closest peers in range of {:?} to target {pretty_key:?} are: {:?}", expected_range.ilog2(), restricted_closest_peers.len() + ); + Ok(restricted_closest_peers.into_iter().cloned().collect()) + } + /// Send a `Request` to the provided set of peers and wait for their responses concurrently. /// If `get_all_responses` is true, we wait for the responses from all the peers. /// NB TODO: Will return an error if the request timeouts. diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 4bb21c720c..0d74551751 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -528,7 +528,7 @@ impl Node { async fn close_nodes_shunning_peer(network: &Network, peer_id: PeerId) -> bool { // using `client` to exclude self let closest_peers = match network - .client_get_closest_peers(&NetworkAddress::from_peer(peer_id)) + .client_get_all_close_peers_in_range_or_close_group(&NetworkAddress::from_peer(peer_id)) .await { Ok(peers) => peers, From b6bbb24259b3b6e2cb69d62b9e9c9aa9c442af17 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 18 Oct 2024 07:56:02 +0200 Subject: [PATCH 027/128] refactor(global): remove patch from Cargo.toml --- Cargo.lock | 74 ++++++++++++++++++++++++++++-------------------------- Cargo.toml | 3 --- 2 files changed, 38 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 73fae82220..5d397e2a98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,7 +119,8 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "056f2c01b2aed86e15b43c47d109bfc8b82553dc34e66452875e51247ec31ab2" dependencies = [ "alloy-consensus", "alloy-contract", @@ -151,7 +152,8 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" dependencies = [ "alloy-eips", "alloy-primitives", @@ -166,7 +168,8 @@ dependencies = [ [[package]] name = "alloy-contract" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "917f7d12cf3971dc8c11c9972f732b35ccb9aaaf5f28f2f87e9e6523bee3a8ad" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -237,7 +240,8 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -254,7 +258,8 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" dependencies = [ "alloy-primitives", "alloy-serde", @@ -276,7 +281,8 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -289,7 +295,8 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -309,7 +316,8 @@ dependencies = [ [[package]] name = "alloy-network-primitives" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -321,7 +329,8 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -364,7 +373,8 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" dependencies = [ "alloy-chains", "alloy-consensus", @@ -377,7 +387,6 @@ dependencies = [ "alloy-rpc-client", "alloy-rpc-types-anvil", "alloy-rpc-types-eth", - "alloy-signer", "alloy-signer-local", "alloy-transport", "alloy-transport-http", @@ -388,17 +397,14 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", - "parking_lot", "pin-project", "reqwest 0.12.7", - "schnellru", "serde", "serde_json", "thiserror", "tokio", "tracing", "url", - "wasmtimer", ] [[package]] @@ -426,7 +432,8 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -442,13 +449,13 @@ dependencies = [ "tower 0.5.1", "tracing", "url", - "wasmtimer", ] [[package]] name = "alloy-rpc-types" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" dependencies = [ "alloy-primitives", "alloy-rpc-types-anvil", @@ -460,7 +467,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" dependencies = [ "alloy-primitives", "alloy-serde", @@ -470,7 +478,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" dependencies = [ "alloy-consensus", "alloy-eips", @@ -488,7 +497,8 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" dependencies = [ "alloy-primitives", "serde", @@ -498,7 +508,8 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" dependencies = [ "alloy-primitives", "async-trait", @@ -511,7 +522,8 @@ dependencies = [ [[package]] name = "alloy-signer-local" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f" dependencies = [ "alloy-consensus", "alloy-network", @@ -599,7 +611,8 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -613,13 +626,13 @@ dependencies = [ "tracing", "url", "wasm-bindgen-futures", - "wasmtimer", ] [[package]] name = "alloy-transport-http" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#e201df849552ee8e3279723de18add7ccf21e1ab" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -7553,17 +7566,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "schnellru" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" -dependencies = [ - "ahash", - "cfg-if", - "hashbrown 0.13.2", -] - [[package]] name = "scoped-tls" version = "1.0.1" diff --git a/Cargo.toml b/Cargo.toml index 0422d748c8..779485a2c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,3 @@ pre-release-commit-message = "chore(release): release commit, tags, deps and cha publish = false push = false tag = false - -[patch.crates-io] -alloy = { git = 'https://github.com/alloy-rs/alloy.git', branch = "main" } From 8603d3fee8dd0cc38bddc6c88a33fdc9a8c7823e Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 18 Oct 2024 09:26:27 +0200 Subject: [PATCH 028/128] test(wasm): improve wasm test index.html --- autonomi/index.html | 71 ++++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 33 deletions(-) diff --git a/autonomi/index.html b/autonomi/index.html index 6f4d8182ad..84f7480543 100644 --- a/autonomi/index.html +++ b/autonomi/index.html @@ -1,13 +1,13 @@ - + - - - - + document.getElementById("btn-run").addEventListener("click", run, false); + - - - - + + + + + \ No newline at end of file From 9a7ce8ce1b9e9bd9eeed04335beead76c8bcc14f Mon Sep 17 00:00:00 2001 From: grumbach Date: Fri, 18 Oct 2024 16:00:13 +0900 Subject: [PATCH 029/128] feat: user data stored in vault --- autonomi/Cargo.toml | 4 +- autonomi/src/client/archive.rs | 23 ++--- autonomi/src/client/data.rs | 4 +- autonomi/src/client/mod.rs | 2 + autonomi/src/client/vault.rs | 38 ++++++-- autonomi/src/client/vault_user_data.rs | 118 +++++++++++++++++++++++++ autonomi/src/client/wasm.rs | 24 ++--- autonomi/tests/fs.rs | 21 +++-- sn_networking/src/record_store.rs | 5 +- sn_protocol/src/error.rs | 3 + sn_protocol/src/lib.rs | 5 +- sn_protocol/src/storage/scratchpad.rs | 26 ++++-- 12 files changed, 215 insertions(+), 58 deletions(-) create mode 100644 autonomi/src/client/vault_user_data.rs diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index c7ecf07338..12dbf13cf9 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -13,10 +13,10 @@ repository = "https://github.com/maidsafe/safe_network" crate-type = ["cdylib", "rlib"] [features] -default = ["data"] +default = ["data", "vault"] full = ["data", "registers", "vault"] data = [] -vault = ["data"] +vault = ["data", "registers"] fs = ["tokio/fs", "data"] local = ["sn_networking/local", "test_utils/local", "sn_evm/local"] registers = ["data"] diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs index 2e4b1b7e4a..17055e0682 100644 --- a/autonomi/src/client/archive.rs +++ b/autonomi/src/client/archive.rs @@ -14,13 +14,12 @@ use std::{ use sn_networking::target_arch::{Duration, SystemTime, UNIX_EPOCH}; use super::{ - data::DataAddr, - data::{GetError, PutError}, + data::{CostError, DataAddr, GetError, PutError}, Client, }; use bytes::Bytes; use serde::{Deserialize, Serialize}; -use sn_evm::EvmWallet; +use sn_evm::{AttoTokens, EvmWallet}; use xor_name::XorName; /// The address of an archive on the network. Points to an [`Archive`]. @@ -36,13 +35,13 @@ pub enum RenameError { /// An archive of files that containing file paths, their metadata and the files data addresses /// Using archives is useful for uploading entire directories to the network, only needing to keep track of a single address. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] pub struct Archive { map: HashMap, } /// Metadata for a file in an archive -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Metadata { pub created: u64, pub modified: u64, @@ -147,12 +146,6 @@ impl Archive { } } -impl Default for Archive { - fn default() -> Self { - Self::new() - } -} - impl Client { /// Fetch an archive from the network pub async fn archive_get(&self, addr: ArchiveAddr) -> Result { @@ -171,4 +164,12 @@ impl Client { .map_err(|e| PutError::Serialization(format!("Failed to serialize archive: {e:?}")))?; self.data_put(bytes, wallet).await } + + /// Get the cost to upload an archive + pub async fn archive_cost(&self, archive: Archive) -> Result { + let bytes = archive + .into_bytes() + .map_err(|e| CostError::Serialization(format!("Failed to serialize archive: {e:?}")))?; + self.data_cost(bytes).await + } } diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 366ad643be..d417978b81 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -39,7 +39,7 @@ pub enum PutError { Network(#[from] NetworkError), #[error("Error occurred during payment.")] PayError(#[from] PayError), - #[error("Failed to serialize {0}")] + #[error("Serialization error: {0}")] Serialization(String), #[error("A wallet error occurred.")] Wallet(#[from] sn_evm::EvmError), @@ -82,6 +82,8 @@ pub enum CostError { CouldNotGetStoreQuote(XorName), #[error("Could not get store costs: {0:?}")] CouldNotGetStoreCosts(NetworkError), + #[error("Failed to serialize {0}")] + Serialization(String), } impl Client { diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index df5dab4ec0..2205d51cd5 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -18,6 +18,8 @@ pub mod fs; pub mod registers; #[cfg(feature = "vault")] pub mod vault; +#[cfg(feature = "vault")] +pub mod vault_user_data; #[cfg(target_arch = "wasm32")] pub mod wasm; diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 02eda1f4a6..bbcfd18bbd 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -7,17 +7,18 @@ // permissions and limitations relating to use of the SAFE Network Software. use std::collections::HashSet; +use std::hash::{DefaultHasher, Hash, Hasher}; use crate::client::data::PutError; use crate::client::Client; use bls::SecretKey; -use bytes::Bytes; use libp2p::kad::{Quorum, Record}; use sn_evm::EvmWallet; use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}; use sn_protocol::storage::{ try_serialize_record, RecordKind, RetryStrategy, Scratchpad, ScratchpadAddress, }; +use sn_protocol::Bytes; use sn_protocol::{storage::try_deserialize_record, NetworkAddress}; use tracing::info; @@ -33,16 +34,32 @@ pub enum VaultError { Network(#[from] NetworkError), } +/// The version of the vault content +/// The version is used to determine the type of the contents of the bytes contained in a vault +/// Custom apps can use this to store their own custom types of data in vaults +/// The value 0 is reserved for tests +pub type VaultContentVersion = u64; + +/// For custom apps using Scratchpad, this function converts an app identifier or name to a VaultContentVersion +pub fn app_name_to_version(s: T) -> VaultContentVersion { + let mut hasher = DefaultHasher::new(); + s.hash(&mut hasher); + hasher.finish() +} + impl Client { /// Retrieves and returns a decrypted vault if one exists. + /// Returns the version of the vault content + /// The version is used to determine the type of the contents of the bytes pub async fn fetch_and_decrypt_vault( &self, secret_key: &SecretKey, - ) -> Result, VaultError> { + ) -> Result<(Bytes, VaultContentVersion), VaultError> { info!("Fetching and decrypting vault"); let pad = self.get_vault_from_network(secret_key).await?; - Ok(pad.decrypt_data(secret_key)?) + let data = pad.decrypt_data(secret_key)?; + Ok((data, pad.version())) } /// Gets the vault Scratchpad from a provided client public key @@ -81,14 +98,17 @@ impl Client { /// Put data into the client's VaultPacket /// - /// Pays for a new VaultPacket if none yet created for the client. Returns the current version - /// of the data on success. + /// Pays for a new VaultPacket if none yet created for the client. + /// Provide the bytes to be written to the vault and the version of the vault content. + /// The Version of the vault content is used to determine the type of the contents of the bytes. + /// It is recommended to use the hash of the app name or unique identifier as the version. pub async fn write_bytes_to_vault( &self, data: Bytes, wallet: &EvmWallet, secret_key: &SecretKey, - ) -> Result { + version: VaultContentVersion, + ) -> Result<(), PutError> { let client_pk = secret_key.public_key(); let pad_res = self.get_vault_from_network(secret_key).await; @@ -106,10 +126,10 @@ impl Client { existing_data } else { trace!("new scratchpad creation"); - Scratchpad::new(client_pk) + Scratchpad::new(client_pk, version) }; - let next_count = scratch.update_and_sign(data, secret_key); + let _next_count = scratch.update_and_sign(data, secret_key); let scratch_address = scratch.network_address(); let scratch_key = scratch_address.to_record_key(); @@ -181,6 +201,6 @@ impl Client { ) })?; - Ok(next_count) + Ok(()) } } diff --git a/autonomi/src/client/vault_user_data.rs b/autonomi/src/client/vault_user_data.rs new file mode 100644 index 0000000000..2dbe941430 --- /dev/null +++ b/autonomi/src/client/vault_user_data.rs @@ -0,0 +1,118 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::collections::HashMap; +use std::collections::HashSet; + +use super::archive::ArchiveAddr; +use super::data::GetError; +use super::data::PutError; +use super::registers::RegisterAddress; +use super::vault::VaultError; +use super::Client; +use crate::client::vault::{app_name_to_version, VaultContentVersion}; +use bls::SecretKey; +use serde::{Deserialize, Serialize}; +use sn_evm::EvmWallet; +use sn_protocol::Bytes; + +use std::sync::LazyLock; + +/// Vault content version for UserDataVault +pub static USER_DATA_VAULT_CONTENT_VERSION: LazyLock = + LazyLock::new(|| app_name_to_version("UserData")); + +/// UserData is stored in Vaults and contains most of a user's private data: +/// It allows users to keep track of only the key to their User Data Vault +/// while having the rest kept on the Network encrypted in a Vault for them +/// Using User Data Vault is optional, one can decide to keep all their data locally instead. +#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)] +pub struct UserData { + /// The register secret key hex encoded + pub register_sk: Option, + /// Owned register addresses + pub registers: HashSet, + /// Owned file archive addresses + pub file_archives: HashSet, + + /// Owner register names, providing it is optional + pub register_names: HashMap, + /// Owned file archive addresses along with a name for that archive providing it is optional + pub file_archive_names: HashMap, +} + +/// Errors that can occur during the get operation. +#[derive(Debug, thiserror::Error)] +pub enum UserDataVaultGetError { + #[error("Vault error: {0}")] + Vault(#[from] VaultError), + #[error("Unsupported vault content version: {0}")] + UnsupportedVaultContentVersion(VaultContentVersion), + #[error("Serialization error: {0}")] + Serialization(String), + #[error("Get error: {0}")] + GetError(#[from] GetError), +} + +impl UserData { + /// Create a new empty UserData + pub fn new() -> Self { + Self::default() + } + + /// To bytes + pub fn to_bytes(&self) -> Result { + let bytes = rmp_serde::to_vec(&self)?; + Ok(Bytes::from(bytes)) + } + + /// From bytes + pub fn from_bytes(bytes: Bytes) -> Result { + let vault_content = rmp_serde::from_slice(&bytes)?; + Ok(vault_content) + } +} + +impl Client { + /// Get the user data from the vault + pub async fn get_user_data_from_vault( + &self, + secret_key: &SecretKey, + ) -> Result { + let (bytes, version) = self.fetch_and_decrypt_vault(secret_key).await?; + + if version != *USER_DATA_VAULT_CONTENT_VERSION { + return Err(UserDataVaultGetError::UnsupportedVaultContentVersion( + version, + )); + } + + let vault = UserData::from_bytes(bytes).map_err(|e| { + UserDataVaultGetError::Serialization(format!( + "Failed to deserialize vault content: {e}" + )) + })?; + + Ok(vault) + } + + /// Put the user data to the vault + pub async fn put_user_data_to_vault( + &self, + secret_key: &SecretKey, + wallet: &EvmWallet, + user_data: UserData, + ) -> Result<(), PutError> { + let bytes = user_data + .to_bytes() + .map_err(|e| PutError::Serialization(format!("Failed to serialize user data: {e}")))?; + self.write_bytes_to_vault(bytes, wallet, secret_key, *USER_DATA_VAULT_CONTENT_VERSION) + .await?; + Ok(()) + } +} diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index 34400356f2..3bc2504636 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -2,6 +2,10 @@ use libp2p::Multiaddr; use wasm_bindgen::prelude::*; use super::address::{addr_to_str, str_to_addr}; +use super::vault_user_data::UserData; + +#[wasm_bindgen(js_name = UserData)] +pub struct JsUserData(UserData); #[wasm_bindgen(js_name = Client)] pub struct JsClient(super::Client); @@ -115,33 +119,31 @@ mod vault { #[wasm_bindgen(js_class = Client)] impl JsClient { - #[wasm_bindgen(js_name = fetchAndDecryptVault)] - pub async fn fetch_and_decrypt_vault( + #[wasm_bindgen(js_name = getUserDataFromVault)] + pub async fn get_user_data_from_vault( &self, secret_key: Vec, - ) -> Result>, JsError> { + ) -> Result { let secret_key: [u8; 32] = secret_key[..].try_into()?; let secret_key = SecretKey::from_bytes(secret_key)?; - let vault = self.0.fetch_and_decrypt_vault(&secret_key).await?; - let vault = vault.map(|v| v.to_vec()); + let user_data = self.0.get_user_data_from_vault(&secret_key).await?; - Ok(vault) + Ok(JsUserData(user_data)) } - #[wasm_bindgen(js_name = writeBytesToVault)] - pub async fn write_bytes_to_vault( + #[wasm_bindgen(js_name = putUserDataToVault)] + pub async fn put_user_data_to_vault( &self, - vault: Vec, + user_data: JsUserData, wallet: &mut JsWallet, secret_key: Vec, ) -> Result<(), JsError> { let secret_key: [u8; 32] = secret_key[..].try_into()?; let secret_key = SecretKey::from_bytes(secret_key)?; - let vault = bytes::Bytes::from(vault); self.0 - .write_bytes_to_vault(vault, &mut wallet.0, &secret_key) + .put_user_data_to_vault(&secret_key, &wallet.0, user_data.0) .await?; Ok(()) diff --git a/autonomi/tests/fs.rs b/autonomi/tests/fs.rs index 9c53fd26b8..f59b7bf680 100644 --- a/autonomi/tests/fs.rs +++ b/autonomi/tests/fs.rs @@ -91,24 +91,23 @@ async fn file_into_vault() -> Result<()> { sleep(Duration::from_secs(2)).await; let archive = client.archive_get(addr).await?; + let set_version = 0; client - .write_bytes_to_vault(archive.into_bytes()?, &wallet, &client_sk) + .write_bytes_to_vault(archive.into_bytes()?, &wallet, &client_sk, set_version) .await?; // now assert over the stored account packet let new_client = Client::connect(&[]).await?; - if let Some(ap) = new_client.fetch_and_decrypt_vault(&client_sk).await? { - let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(ap)?; + let (ap, got_version) = new_client.fetch_and_decrypt_vault(&client_sk).await?; + assert_eq!(set_version, got_version); + let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(ap)?; - assert_eq!( - archive.iter().count(), - ap_archive_fetched.iter().count(), - "archive fetched should match archive put" - ); - } else { - eyre::bail!("No account packet found"); - } + assert_eq!( + archive.iter().count(), + ap_archive_fetched.iter().count(), + "archive fetched should match archive put" + ); Ok(()) } diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 35b1cdec59..4ac9170e85 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -1238,7 +1238,7 @@ mod tests { let owner_sk = SecretKey::random(); let owner_pk = owner_sk.public_key(); - let mut scratchpad = Scratchpad::new(owner_pk); + let mut scratchpad = Scratchpad::new(owner_pk, 0); let _next_version = scratchpad.update_and_sign(unencrypted_scratchpad_data.clone(), &owner_sk); @@ -1283,8 +1283,7 @@ mod tests { let decrypted_data = scratchpad.decrypt_data(&owner_sk)?; assert_eq!( - decrypted_data, - Some(unencrypted_scratchpad_data), + decrypted_data, unencrypted_scratchpad_data, "Stored scratchpad data should match original" ); } diff --git a/sn_protocol/src/error.rs b/sn_protocol/src/error.rs index 8462ff85f3..2d24feb0d9 100644 --- a/sn_protocol/src/error.rs +++ b/sn_protocol/src/error.rs @@ -51,6 +51,9 @@ pub enum Error { /// The provided SecretyKey failed to decrypt the data #[error("Failed to derive CipherText from encrypted_data")] ScratchpadCipherTextFailed, + /// The provided cypher text is invalid + #[error("Provided cypher text is invalid")] + ScratchpadCipherTextInvalid, // ---------- payment errors #[error("There was an error getting the storecost from kademlia store")] diff --git a/sn_protocol/src/lib.rs b/sn_protocol/src/lib.rs index 4d3b92628d..f397173ca1 100644 --- a/sn_protocol/src/lib.rs +++ b/sn_protocol/src/lib.rs @@ -32,7 +32,10 @@ pub use error::Error; use storage::ScratchpadAddress; use self::storage::{ChunkAddress, RegisterAddress, SpendAddress}; -use bytes::Bytes; + +/// Re-export of Bytes used throughout the protocol +pub use bytes::Bytes; + use libp2p::{ kad::{KBucketDistance as Distance, KBucketKey as Key, RecordKey}, multiaddr::Protocol, diff --git a/sn_protocol/src/storage/scratchpad.rs b/sn_protocol/src/storage/scratchpad.rs index ea38d2e686..73c4aad3c1 100644 --- a/sn_protocol/src/storage/scratchpad.rs +++ b/sn_protocol/src/storage/scratchpad.rs @@ -8,9 +8,9 @@ use super::ScratchpadAddress; use crate::error::{Error, Result}; +use crate::Bytes; use crate::NetworkAddress; use bls::{Ciphertext, PublicKey, SecretKey, Signature}; -use bytes::Bytes; use serde::{Deserialize, Serialize}; use xor_name::XorName; @@ -23,6 +23,8 @@ pub struct Scratchpad { /// Network address. Omitted when serialising and /// calculated from the `encrypted_data` when deserialising. address: ScratchpadAddress, + /// Data version + version: u64, /// Contained data. This should be encrypted #[debug(skip)] encrypted_data: Bytes, @@ -35,10 +37,11 @@ pub struct Scratchpad { impl Scratchpad { /// Creates a new instance of `Scratchpad`. - pub fn new(owner: PublicKey) -> Self { + pub fn new(owner: PublicKey, version: u64) -> Self { Self { address: ScratchpadAddress::new(owner), encrypted_data: Bytes::new(), + version, counter: 0, signature: None, } @@ -49,6 +52,11 @@ impl Scratchpad { self.counter } + /// Return the current version + pub fn version(&self) -> u64 { + self.version + } + /// Increments the counter value. pub fn increment(&mut self) -> u64 { self.counter += 1; @@ -94,13 +102,13 @@ impl Scratchpad { } /// Returns the encrypted_data, decrypted via the passed SecretKey - pub fn decrypt_data(&self, sk: &SecretKey) -> Result> { - Ok(sk - .decrypt( - &Ciphertext::from_bytes(&self.encrypted_data) - .map_err(|_| Error::ScratchpadCipherTextFailed)?, - ) - .map(Bytes::from)) + pub fn decrypt_data(&self, sk: &SecretKey) -> Result { + let cipher = Ciphertext::from_bytes(&self.encrypted_data) + .map_err(|_| Error::ScratchpadCipherTextFailed)?; + let bytes = sk + .decrypt(&cipher) + .ok_or(Error::ScratchpadCipherTextInvalid)?; + Ok(Bytes::from(bytes)) } /// Returns the encrypted_data hash From 3c1dd9e16d4db4d689058fce9e1e1296bd6bfa92 Mon Sep 17 00:00:00 2001 From: grumbach Date: Fri, 18 Oct 2024 16:03:08 +0900 Subject: [PATCH 030/128] chore: fix test --- autonomi/tests/fs.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/autonomi/tests/fs.rs b/autonomi/tests/fs.rs index f59b7bf680..b952852bc2 100644 --- a/autonomi/tests/fs.rs +++ b/autonomi/tests/fs.rs @@ -104,8 +104,7 @@ async fn file_into_vault() -> Result<()> { let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(ap)?; assert_eq!( - archive.iter().count(), - ap_archive_fetched.iter().count(), + archive, ap_archive_fetched, "archive fetched should match archive put" ); From 8bd5007c40f885e26fe22b535281532180632b83 Mon Sep 17 00:00:00 2001 From: grumbach Date: Fri, 18 Oct 2024 16:22:53 +0900 Subject: [PATCH 031/128] chore: improve naming to data encoding --- autonomi/src/client/vault.rs | 17 ++++++++-------- autonomi/src/client/vault_user_data.rs | 27 ++++++++++++++------------ sn_protocol/src/storage/scratchpad.rs | 14 ++++++------- 3 files changed, 31 insertions(+), 27 deletions(-) diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index bbcfd18bbd..9417a8acf9 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -34,14 +34,15 @@ pub enum VaultError { Network(#[from] NetworkError), } -/// The version of the vault content -/// The version is used to determine the type of the contents of the bytes contained in a vault +/// The content type of the vault data +/// The number is used to determine the type of the contents of the bytes contained in a vault /// Custom apps can use this to store their own custom types of data in vaults +/// It is recommended to use the hash of the app name or unique identifier as the version /// The value 0 is reserved for tests -pub type VaultContentVersion = u64; +pub type VaultContentType = u64; -/// For custom apps using Scratchpad, this function converts an app identifier or name to a VaultContentVersion -pub fn app_name_to_version(s: T) -> VaultContentVersion { +/// For custom apps using Scratchpad, this function converts an app identifier or name to a VaultContentType +pub fn app_name_to_vault_content_type(s: T) -> VaultContentType { let mut hasher = DefaultHasher::new(); s.hash(&mut hasher); hasher.finish() @@ -54,12 +55,12 @@ impl Client { pub async fn fetch_and_decrypt_vault( &self, secret_key: &SecretKey, - ) -> Result<(Bytes, VaultContentVersion), VaultError> { + ) -> Result<(Bytes, VaultContentType), VaultError> { info!("Fetching and decrypting vault"); let pad = self.get_vault_from_network(secret_key).await?; let data = pad.decrypt_data(secret_key)?; - Ok((data, pad.version())) + Ok((data, pad.data_encoding())) } /// Gets the vault Scratchpad from a provided client public key @@ -107,7 +108,7 @@ impl Client { data: Bytes, wallet: &EvmWallet, secret_key: &SecretKey, - version: VaultContentVersion, + version: VaultContentType, ) -> Result<(), PutError> { let client_pk = secret_key.public_key(); diff --git a/autonomi/src/client/vault_user_data.rs b/autonomi/src/client/vault_user_data.rs index 2dbe941430..b0d88854cc 100644 --- a/autonomi/src/client/vault_user_data.rs +++ b/autonomi/src/client/vault_user_data.rs @@ -15,7 +15,7 @@ use super::data::PutError; use super::registers::RegisterAddress; use super::vault::VaultError; use super::Client; -use crate::client::vault::{app_name_to_version, VaultContentVersion}; +use crate::client::vault::{app_name_to_vault_content_type, VaultContentType}; use bls::SecretKey; use serde::{Deserialize, Serialize}; use sn_evm::EvmWallet; @@ -23,9 +23,9 @@ use sn_protocol::Bytes; use std::sync::LazyLock; -/// Vault content version for UserDataVault -pub static USER_DATA_VAULT_CONTENT_VERSION: LazyLock = - LazyLock::new(|| app_name_to_version("UserData")); +/// Vault content type for UserDataVault +pub static USER_DATA_VAULT_CONTENT_IDENTIFIER: LazyLock = + LazyLock::new(|| app_name_to_vault_content_type("UserData")); /// UserData is stored in Vaults and contains most of a user's private data: /// It allows users to keep track of only the key to their User Data Vault @@ -51,8 +51,8 @@ pub struct UserData { pub enum UserDataVaultGetError { #[error("Vault error: {0}")] Vault(#[from] VaultError), - #[error("Unsupported vault content version: {0}")] - UnsupportedVaultContentVersion(VaultContentVersion), + #[error("Unsupported vault content type: {0}")] + UnsupportedVaultContentType(VaultContentType), #[error("Serialization error: {0}")] Serialization(String), #[error("Get error: {0}")] @@ -86,10 +86,8 @@ impl Client { ) -> Result { let (bytes, version) = self.fetch_and_decrypt_vault(secret_key).await?; - if version != *USER_DATA_VAULT_CONTENT_VERSION { - return Err(UserDataVaultGetError::UnsupportedVaultContentVersion( - version, - )); + if version != *USER_DATA_VAULT_CONTENT_IDENTIFIER { + return Err(UserDataVaultGetError::UnsupportedVaultContentType(version)); } let vault = UserData::from_bytes(bytes).map_err(|e| { @@ -111,8 +109,13 @@ impl Client { let bytes = user_data .to_bytes() .map_err(|e| PutError::Serialization(format!("Failed to serialize user data: {e}")))?; - self.write_bytes_to_vault(bytes, wallet, secret_key, *USER_DATA_VAULT_CONTENT_VERSION) - .await?; + self.write_bytes_to_vault( + bytes, + wallet, + secret_key, + *USER_DATA_VAULT_CONTENT_IDENTIFIER, + ) + .await?; Ok(()) } } diff --git a/sn_protocol/src/storage/scratchpad.rs b/sn_protocol/src/storage/scratchpad.rs index 73c4aad3c1..5c99cbdcac 100644 --- a/sn_protocol/src/storage/scratchpad.rs +++ b/sn_protocol/src/storage/scratchpad.rs @@ -23,8 +23,8 @@ pub struct Scratchpad { /// Network address. Omitted when serialising and /// calculated from the `encrypted_data` when deserialising. address: ScratchpadAddress, - /// Data version - version: u64, + /// Data encoding: custom apps using scratchpad should use this so they can identify the type of data they are storing + data_encoding: u64, /// Contained data. This should be encrypted #[debug(skip)] encrypted_data: Bytes, @@ -37,11 +37,11 @@ pub struct Scratchpad { impl Scratchpad { /// Creates a new instance of `Scratchpad`. - pub fn new(owner: PublicKey, version: u64) -> Self { + pub fn new(owner: PublicKey, data_encoding: u64) -> Self { Self { address: ScratchpadAddress::new(owner), encrypted_data: Bytes::new(), - version, + data_encoding, counter: 0, signature: None, } @@ -52,9 +52,9 @@ impl Scratchpad { self.counter } - /// Return the current version - pub fn version(&self) -> u64 { - self.version + /// Return the current data encoding + pub fn data_encoding(&self) -> u64 { + self.data_encoding } /// Increments the counter value. From 88694f74ea397573ac43b33c8c5fc66864285b97 Mon Sep 17 00:00:00 2001 From: grumbach Date: Fri, 18 Oct 2024 16:28:40 +0900 Subject: [PATCH 032/128] chore: cleanup namings accordingly --- autonomi/src/client/vault.rs | 16 +++++++--------- autonomi/src/client/vault_user_data.rs | 8 +++++--- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 9417a8acf9..4004a3d530 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -37,11 +37,11 @@ pub enum VaultError { /// The content type of the vault data /// The number is used to determine the type of the contents of the bytes contained in a vault /// Custom apps can use this to store their own custom types of data in vaults -/// It is recommended to use the hash of the app name or unique identifier as the version +/// It is recommended to use the hash of the app name or an unique identifier as the content type using [`app_name_to_vault_content_type`] /// The value 0 is reserved for tests pub type VaultContentType = u64; -/// For custom apps using Scratchpad, this function converts an app identifier or name to a VaultContentType +/// For custom apps using Scratchpad, this function converts an app identifier or name to a [`VaultContentType`] pub fn app_name_to_vault_content_type(s: T) -> VaultContentType { let mut hasher = DefaultHasher::new(); s.hash(&mut hasher); @@ -50,8 +50,7 @@ pub fn app_name_to_vault_content_type(s: T) -> VaultContentType { impl Client { /// Retrieves and returns a decrypted vault if one exists. - /// Returns the version of the vault content - /// The version is used to determine the type of the contents of the bytes + /// Returns the content type of the bytes in the vault pub async fn fetch_and_decrypt_vault( &self, secret_key: &SecretKey, @@ -100,15 +99,14 @@ impl Client { /// Put data into the client's VaultPacket /// /// Pays for a new VaultPacket if none yet created for the client. - /// Provide the bytes to be written to the vault and the version of the vault content. - /// The Version of the vault content is used to determine the type of the contents of the bytes. - /// It is recommended to use the hash of the app name or unique identifier as the version. + /// Provide the bytes to be written to the vault and the content type of those bytes. + /// It is recommended to use the hash of the app name or unique identifier as the content type. pub async fn write_bytes_to_vault( &self, data: Bytes, wallet: &EvmWallet, secret_key: &SecretKey, - version: VaultContentType, + content_type: VaultContentType, ) -> Result<(), PutError> { let client_pk = secret_key.public_key(); @@ -127,7 +125,7 @@ impl Client { existing_data } else { trace!("new scratchpad creation"); - Scratchpad::new(client_pk, version) + Scratchpad::new(client_pk, content_type) }; let _next_count = scratch.update_and_sign(data, secret_key); diff --git a/autonomi/src/client/vault_user_data.rs b/autonomi/src/client/vault_user_data.rs index b0d88854cc..779cf023d9 100644 --- a/autonomi/src/client/vault_user_data.rs +++ b/autonomi/src/client/vault_user_data.rs @@ -84,10 +84,12 @@ impl Client { &self, secret_key: &SecretKey, ) -> Result { - let (bytes, version) = self.fetch_and_decrypt_vault(secret_key).await?; + let (bytes, content_type) = self.fetch_and_decrypt_vault(secret_key).await?; - if version != *USER_DATA_VAULT_CONTENT_IDENTIFIER { - return Err(UserDataVaultGetError::UnsupportedVaultContentType(version)); + if content_type != *USER_DATA_VAULT_CONTENT_IDENTIFIER { + return Err(UserDataVaultGetError::UnsupportedVaultContentType( + content_type, + )); } let vault = UserData::from_bytes(bytes).map_err(|e| { From 37d9022c0feb107f7cef289a2b3da4fce1e16167 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 18 Oct 2024 11:08:51 +0200 Subject: [PATCH 033/128] refactor(evmlib): rename error variant and remove dead code allowances --- evmlib/src/external_signer.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/evmlib/src/external_signer.rs b/evmlib/src/external_signer.rs index a182b88cd5..83b43695d0 100644 --- a/evmlib/src/external_signer.rs +++ b/evmlib/src/external_signer.rs @@ -18,11 +18,10 @@ use std::collections::HashMap; pub enum Error { #[error("Network token contract error: {0}")] NetworkTokenContract(#[from] network_token::Error), - #[error("Chunk payments contract error: {0}")] - ChunkPaymentsContract(#[from] data_payments::error::Error), + #[error("Data payments contract error: {0}")] + DataPaymentsContract(#[from] data_payments::error::Error), } -#[allow(dead_code)] /// Approve an address / smart contract to spend this wallet's payment tokens. /// /// Returns the transaction calldata (input, to). @@ -36,7 +35,6 @@ pub fn approve_to_spend_tokens_calldata( network_token.approve_calldata(spender, value) } -#[allow(dead_code)] /// Transfer payment tokens from the supplied wallet to an address. /// /// Returns the transaction calldata (input, to). @@ -50,7 +48,6 @@ pub fn transfer_tokens_calldata( network_token.transfer_calldata(receiver, amount) } -#[allow(dead_code)] pub struct PayForQuotesCalldataReturnType { pub batched_calldata_map: HashMap>, pub to: Address, @@ -58,7 +55,6 @@ pub struct PayForQuotesCalldataReturnType { pub approve_amount: Amount, } -#[allow(dead_code)] /// Use this wallet to pay for chunks in batched transfer transactions. /// If the amount of transfers is more than one transaction can contain, the transfers will be split up over multiple transactions. /// From f0ade29bb809ba4f1eff0e7ee79f3b84ec341517 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 18 Oct 2024 16:12:53 +0200 Subject: [PATCH 034/128] feat(autonomi): expose archive methods in wasm --- autonomi/src/client/wasm.rs | 56 +++++++++++++++++++++++++--------- autonomi/tests-js/index.js | 3 +- autonomi/tests-js/package.json | 2 +- 3 files changed, 44 insertions(+), 17 deletions(-) diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index c12ebfc9a0..630dcbbb4c 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -79,33 +79,59 @@ mod archive { use std::{collections::HashMap, path::PathBuf}; use xor_name::XorName; + #[wasm_bindgen(js_name = Archive)] + pub struct JsArchive(Archive); + + #[wasm_bindgen(js_class = Archive)] + impl JsArchive { + #[wasm_bindgen(constructor)] + pub fn new() -> Self { + Self(Archive::new()) + } + + #[wasm_bindgen(js_name = addNewFile)] + pub fn add_new_file(&mut self, path: String, data_addr: String) -> Result<(), JsError> { + let path = PathBuf::from(path); + let data_addr = str_to_addr(&data_addr)?; + self.0.add_new_file(path, data_addr); + + Ok(()) + } + + #[wasm_bindgen(js_name = renameFile)] + pub fn rename_file(&mut self, old_path: String, new_path: String) -> Result<(), JsError> { + let old_path = PathBuf::from(old_path); + let new_path = PathBuf::from(new_path); + self.0.rename_file(&old_path, &new_path)?; + + Ok(()) + } + + #[wasm_bindgen] + pub fn map(&self) -> Result { + let files = serde_wasm_bindgen::to_value(self.0.map())?; + Ok(files) + } + } + #[wasm_bindgen(js_class = Client)] impl JsClient { #[wasm_bindgen(js_name = archiveGet)] - pub async fn archive_get(&self, addr: String) -> Result { + pub async fn archive_get(&self, addr: String) -> Result { let addr = str_to_addr(&addr)?; - let data = self.0.archive_get(addr).await?; + let archive = self.0.archive_get(addr).await?; + let archive = JsArchive(archive); - // To `Map` (JS) - let data = serde_wasm_bindgen::to_value(&data.map())?; - Ok(data.into()) + Ok(archive) } #[wasm_bindgen(js_name = archivePut)] pub async fn archive_put( &self, - map: JsValue, + archive: &JsArchive, wallet: &JsWallet, ) -> Result { - // From `Map` or `Iterable<[K, V]>` (JS) - let map: HashMap = serde_wasm_bindgen::from_value(map)?; - let mut archive = Archive::new(); - - for (path, (xorname, meta)) in map { - archive.add_file(path, xorname, meta); - } - - let addr = self.0.archive_put(archive, &wallet.0).await?; + let addr = self.0.archive_put(archive.0.clone(), &wallet.0).await?; Ok(addr_to_str(addr)) } diff --git a/autonomi/tests-js/index.js b/autonomi/tests-js/index.js index 0e5edacf55..a44ae3892c 100644 --- a/autonomi/tests-js/index.js +++ b/autonomi/tests-js/index.js @@ -44,7 +44,8 @@ describe('autonomi', function () { it('puts data, creates archive and retrieves it', async () => { const data = randomData(32); const addr = await client.dataPut(data, wallet); - const archive = new Map([["foo", addr]]); + const archive = new atnm.Archive(); + archive.addNewFile("foo", addr); const archiveAddr = await client.archivePut(archive, wallet); const archiveFetched = await client.archiveGet(archiveAddr); diff --git a/autonomi/tests-js/package.json b/autonomi/tests-js/package.json index b33e6d0e30..6da24b1037 100644 --- a/autonomi/tests-js/package.json +++ b/autonomi/tests-js/package.json @@ -1,7 +1,7 @@ { "type": "module", "scripts": { - "serve": "http-server -a 127.0.0.1 ../" + "serve": "http-server -c-1 -a 127.0.0.1 ../" }, "devDependencies": { "chai": "^5.1.1", From 8d3b07d7fcab162e38e78705c4e9b197b2d3e9c1 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 18 Oct 2024 16:13:27 +0200 Subject: [PATCH 035/128] refactor(autonomi): remove index.html --- autonomi/README.md | 13 -------- autonomi/index.html | 75 --------------------------------------------- 2 files changed, 88 deletions(-) delete mode 100644 autonomi/index.html diff --git a/autonomi/README.md b/autonomi/README.md index babfa51eed..c067c97bb9 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -92,19 +92,6 @@ npm run serve Then go to `http://127.0.0.1:8080/tests-js` in the browser. Here, enter a `ws` multiaddr of a local node and press 'run'. -#### `index.html` - -There is also a simple `index.html` file that runs some JavaScript. - -Build the package (again with the env variables) and run a webserver, e.g. with Python: -```sh -wasm-pack build --dev --target=web autonomi -python -m http.server --directory=autonomi 8000 -``` - -Then visit `http://127.0.0.1:8000/` in your (modern) browser. - - ## Faucet (local) There is no faucet server, but instead you can use the `Deployer wallet private key` printed in the EVM node output to diff --git a/autonomi/index.html b/autonomi/index.html deleted file mode 100644 index 0353446683..0000000000 --- a/autonomi/index.html +++ /dev/null @@ -1,75 +0,0 @@ - - - - - - - - - - - - - - - - \ No newline at end of file From 901d5d4dc2246cf78cbecf88b965b6b5b990d98a Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 18 Oct 2024 16:14:49 +0200 Subject: [PATCH 036/128] test(autonomi): fix browser wasm test --- autonomi/tests/wasm.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/autonomi/tests/wasm.rs b/autonomi/tests/wasm.rs index 485193ea48..8f27576f06 100644 --- a/autonomi/tests/wasm.rs +++ b/autonomi/tests/wasm.rs @@ -21,15 +21,15 @@ wasm_bindgen_test_configure!(run_in_browser); async fn put() -> Result<(), Box> { enable_logging_wasm("sn_networking,autonomi,wasm"); - let client = Client::connect(&peers_from_env()?).await.unwrap(); + let client = Client::connect(&peers_from_env()?).await?; let wallet = get_funded_wallet(); + let data = gen_random_data(1024 * 1024 * 10); - let data = gen_random_data(1024 * 1024 * 2); // 2MiB - let addr = client.put(data.clone(), &wallet).await.unwrap(); + let addr = client.data_put(data.clone(), &wallet).await?; - sleep(Duration::from_secs(2)).await; + sleep(Duration::from_secs(10)).await; - let data_fetched = client.get(addr).await.unwrap(); + let data_fetched = client.data_get(addr).await?; assert_eq!(data, data_fetched, "data fetched should match data put"); Ok(()) From 0fc6f23103bb39b6a6c41dba0684eb8e72fa0d90 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 18 Oct 2024 16:28:04 +0200 Subject: [PATCH 037/128] refactor(autonomi): remove unused wasm imports --- autonomi/src/client/wasm.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index 630dcbbb4c..56ebca582e 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -74,10 +74,8 @@ impl JsClient { mod archive { use super::*; - use crate::client::archive::Metadata; use crate::client::{address::str_to_addr, archive::Archive}; - use std::{collections::HashMap, path::PathBuf}; - use xor_name::XorName; + use std::path::PathBuf; #[wasm_bindgen(js_name = Archive)] pub struct JsArchive(Archive); From 2ad12d596eb81c091c8c56098354d3c8bbec6da1 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 18 Oct 2024 17:22:05 +0200 Subject: [PATCH 038/128] feat(autonomi): expose user data in wasm --- autonomi/src/client/vault_user_data.rs | 25 ++++++++++ autonomi/src/client/wasm.rs | 63 ++++++++++++++++++++++++-- autonomi/tests-js/index.js | 17 +++++-- 3 files changed, 98 insertions(+), 7 deletions(-) diff --git a/autonomi/src/client/vault_user_data.rs b/autonomi/src/client/vault_user_data.rs index 779cf023d9..a45a2adaea 100644 --- a/autonomi/src/client/vault_user_data.rs +++ b/autonomi/src/client/vault_user_data.rs @@ -65,6 +65,31 @@ impl UserData { Self::default() } + /// Add an archive. Returning true if the archive was newly added. + pub fn add_file_archive(&mut self, archive: ArchiveAddr) -> bool { + self.file_archives.insert(archive) + } + + /// Add a name for an archive. Returning the old archive if it existed. + pub fn add_file_archive_name( + &mut self, + archive: ArchiveAddr, + name: String, + ) -> Option { + self.file_archive_names.insert(name, archive) + } + + /// Remove an archive. Returning true if the archive was removed. + pub fn remove_file_archive(&mut self, archive: ArchiveAddr) -> bool { + // TODO: Should we also remove the name? + self.file_archives.remove(&archive) + } + + /// Remove a archive name. Returning the archive if it existed. + pub fn remove_file_archive_name(&mut self, name: String) -> Option { + self.file_archive_names.remove(&name) + } + /// To bytes pub fn to_bytes(&self) -> Result { let bytes = rmp_serde::to_vec(&self)?; diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index 56ebca582e..6ac80e2558 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -4,9 +4,6 @@ use wasm_bindgen::prelude::*; use super::address::{addr_to_str, str_to_addr}; use super::vault_user_data::UserData; -#[wasm_bindgen(js_name = UserData)] -pub struct JsUserData(UserData); - #[wasm_bindgen(js_name = Client)] pub struct JsClient(super::Client); @@ -141,6 +138,66 @@ mod vault { use super::*; use bls::SecretKey; + #[wasm_bindgen(js_name = UserData)] + pub struct JsUserData(UserData); + + #[wasm_bindgen(js_class = UserData)] + impl JsUserData { + #[wasm_bindgen(constructor)] + pub fn new() -> Self { + Self(UserData::new()) + } + + #[wasm_bindgen(js_name = addArchive)] + pub fn add_archive( + &mut self, + archive: String, + name: Option, + ) -> Result<(), JsError> { + let archive = str_to_addr(&archive)?; + + // TODO: Log when archive already exists? + self.0.add_file_archive(archive); + + if let Some(name) = name { + if let Some(old_archive) = self.0.add_file_archive_name(archive, name.clone()) { + tracing::warn!( + "Overwriting archive stored as '{name}': {old_archive} -> {archive}" + ); + } + } + + Ok(()) + } + + #[wasm_bindgen(js_name = removeArchive)] + pub fn remove_archive(&mut self, archive: String) -> Result<(), JsError> { + let archive = str_to_addr(&archive)?; + self.0.remove_file_archive(archive); + + Ok(()) + } + + #[wasm_bindgen(js_name = removeArchiveName)] + pub fn remove_archive_name(&mut self, name: String) -> Result<(), JsError> { + let _archive_name = self.0.remove_file_archive_name(name); + + Ok(()) + } + + #[wasm_bindgen(js_name = archives)] + pub fn archives(&self) -> Result { + let archives = serde_wasm_bindgen::to_value(&self.0.file_archives)?; + Ok(archives) + } + + #[wasm_bindgen(js_name = archiveNames)] + pub fn archive_names(&self) -> Result { + let archives = serde_wasm_bindgen::to_value(&self.0.file_archive_names)?; + Ok(archives) + } + } + #[wasm_bindgen(js_class = Client)] impl JsClient { #[wasm_bindgen(js_name = getUserDataFromVault)] diff --git a/autonomi/tests-js/index.js b/autonomi/tests-js/index.js index a44ae3892c..75b3b76312 100644 --- a/autonomi/tests-js/index.js +++ b/autonomi/tests-js/index.js @@ -53,13 +53,22 @@ describe('autonomi', function () { assert.deepEqual(archive, archiveFetched); }); - it('writes bytes to vault and fetches it', async () => { + it('writes archive to vault and fetches it', async () => { + const addr = "0000000000000000000000000000000000000000000000000000000000000000"; // Dummy data address const data = randomData(32); const secretKey = atnm.genSecretKey(); - await client.writeBytesToVault(data, wallet, secretKey); - const dataFetched = await client.fetchAndDecryptVault(secretKey); + const archive = new atnm.Archive(); + archive.addNewFile('foo', addr); + const archiveAddr = await client.archivePut(archive, wallet); + + const userData = new atnm.UserData(); + userData.addArchive(archiveAddr, 'foo'); + + await client.putUserDataToVault(data, wallet, secretKey); + const userDataFetched = await client.put_user_data_to_vault(secretKey); - assert.deepEqual(data, dataFetched); + assert.deepEqual(userDataFetched.archives(), userData.archives()); + assert.deepEqual(userDataFetched.archiveNames(), userData.archiveNames()); }); }); From 8538d307d5d227e2b8a3439fb3461b5de087f250 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 21 Oct 2024 11:06:47 +0200 Subject: [PATCH 039/128] feat(evmlib): add token allowance and approval functions & set contract approval to infinite --- evmlib/src/contract/network_token.rs | 14 ++++++++ evmlib/src/wallet.rs | 52 ++++++++++++++++++++++++---- 2 files changed, 59 insertions(+), 7 deletions(-) diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs index a6fad9243d..013d572037 100644 --- a/evmlib/src/contract/network_token.rs +++ b/evmlib/src/contract/network_token.rs @@ -75,6 +75,20 @@ where Ok(balance) } + /// See how many tokens are approved to be spent. + pub async fn allowance(&self, owner: Address, spender: Address) -> Result { + debug!("Getting allowance of owner: {owner} for spender: {spender}",); + let balance = self + .contract + .allowance(owner, spender) + .call() + .await + .inspect_err(|err| error!("Error getting allowance: {err:?}"))? + ._0; + debug!("Allowance of owner: {owner} for spender: {spender} is: {balance}"); + Ok(balance) + } + /// Approve spender to spend a raw amount of tokens. pub async fn approve(&self, spender: Address, value: U256) -> Result { debug!("Approving spender to spend raw amt of tokens: {value}"); diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 8c4ec78298..b9504f69a1 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -70,12 +70,12 @@ impl Wallet { /// Returns the raw balance of payment tokens for this wallet. pub async fn balance_of_tokens(&self) -> Result { - balance_of_tokens(wallet_address(&self.wallet), &self.network).await + balance_of_tokens(self.address(), &self.network).await } /// Returns the raw balance of gas tokens for this wallet. pub async fn balance_of_gas_tokens(&self) -> Result { - balance_of_gas_tokens(wallet_address(&self.wallet), &self.network).await + balance_of_gas_tokens(self.address(), &self.network).await } /// Transfer a raw amount of payment tokens to another address. @@ -96,6 +96,20 @@ impl Wallet { transfer_gas_tokens(self.wallet.clone(), &self.network, to, amount).await } + /// See how many tokens of the owner may be spent by the spender. + pub async fn token_allowance(&self, spender: Address) -> Result { + token_allowance(&self.network, self.address(), spender).await + } + + /// Approve an address / smart contract to spend this wallet's payment tokens. + pub async fn approve_to_spend_tokens( + &self, + spender: Address, + amount: U256, + ) -> Result { + approve_to_spend_tokens(self.wallet.clone(), &self.network, spender, amount).await + } + /// Pays for a single quote. Returns transaction hash of the payment. pub async fn pay_for_quote( &self, @@ -188,8 +202,20 @@ pub async fn balance_of_gas_tokens( Ok(balance) } +/// See how many tokens of the owner may be spent by the spender. +pub async fn token_allowance( + network: &Network, + owner: Address, + spender: Address, +) -> Result { + debug!("Getting allowance for owner: {owner} and spender: {spender}",); + let provider = http_provider(network.rpc_url().clone()); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.allowance(owner, spender).await +} + /// Approve an address / smart contract to spend this wallet's payment tokens. -async fn approve_to_spend_tokens( +pub async fn approve_to_spend_tokens( wallet: EthereumWallet, network: &Network, spender: Address, @@ -250,16 +276,28 @@ pub async fn pay_for_quotes>( let mut tx_hashes_by_quote = BTreeMap::new(); - // Approve the contract to spend enough of the client's tokens. - approve_to_spend_tokens( - wallet.clone(), + // Check allowance + let allowance = token_allowance( network, + wallet_address(&wallet), *network.data_payments_address(), - total_amount, ) .await .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; + // TODO: Get rid of approvals altogether, by using permits or whatever.. + if allowance < total_amount { + // Approve the contract to spend all the client's tokens. + approve_to_spend_tokens( + wallet.clone(), + network, + *network.data_payments_address(), + U256::MAX, + ) + .await + .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; + } + let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); let data_payments = DataPaymentsHandler::new(*network.data_payments_address(), provider); From 6c32173ab41d669781db6b672b72fb1db7f5d6a2 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Mon, 21 Oct 2024 13:13:35 +0200 Subject: [PATCH 040/128] refactor(autonomi): move `new` to top of impl And move conversion to/from bytes to the bottom --- autonomi/src/client/archive.rs | 42 +++++++++++++++++----------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs index 17055e0682..aa5301cfeb 100644 --- a/autonomi/src/client/archive.rs +++ b/autonomi/src/client/archive.rs @@ -68,19 +68,12 @@ impl Default for Metadata { } impl Archive { - /// Deserialize from bytes. - pub fn from_bytes(data: Bytes) -> Result { - let root: Archive = rmp_serde::from_slice(&data[..])?; - - Ok(root) - } - - /// Serialize to bytes. - pub fn into_bytes(&self) -> Result { - let root_serialized = rmp_serde::to_vec(&self)?; - let root_serialized = Bytes::from(root_serialized); - - Ok(root_serialized) + /// Create a new emtpy local archive + /// Note that this does not upload the archive to the network + pub fn new() -> Self { + Self { + map: HashMap::new(), + } } /// Rename a file in an archive @@ -99,14 +92,6 @@ impl Archive { Ok(()) } - /// Create a new emtpy local archive - /// Note that this does not upload the archive to the network - pub fn new() -> Self { - Self { - map: HashMap::new(), - } - } - /// Add a file to a local archive /// Note that this does not upload the archive to the network pub fn add_file(&mut self, path: PathBuf, data_addr: DataAddr, meta: Metadata) { @@ -144,6 +129,21 @@ impl Archive { pub fn map(&self) -> &HashMap { &self.map } + + /// Deserialize from bytes. + pub fn from_bytes(data: Bytes) -> Result { + let root: Archive = rmp_serde::from_slice(&data[..])?; + + Ok(root) + } + + /// Serialize to bytes. + pub fn into_bytes(&self) -> Result { + let root_serialized = rmp_serde::to_vec(&self)?; + let root_serialized = Bytes::from(root_serialized); + + Ok(root_serialized) + } } impl Client { From 3fff7e3a5862f19822f48d731be9f41900b95a19 Mon Sep 17 00:00:00 2001 From: qima Date: Mon, 21 Oct 2024 18:29:17 +0800 Subject: [PATCH 041/128] feat(node): wipe storage_dir when restart for new network --- sn_networking/src/driver.rs | 116 +++++++++++++++++++++++++++++++++++- 1 file changed, 115 insertions(+), 1 deletion(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index a895655650..1ad9c3e7a9 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -62,6 +62,8 @@ use sn_registers::SignedRegister; use std::{ collections::{btree_map::Entry, BTreeMap, HashMap, HashSet, VecDeque}, fmt::Debug, + fs, + io::{Read, Write}, net::SocketAddr, path::PathBuf, }; @@ -359,9 +361,19 @@ impl NetworkBuilder { .set_provider_publication_interval(None); let store_cfg = { + let storage_dir_path = root_dir.join("record_store"); + // In case the node instanace is restarted for a different version of network, + // the previous storage folder shall be wiped out, + // to avoid bring old data into new network. + check_and_wipe_storage_dir_if_necessary( + root_dir.clone(), + storage_dir_path.clone(), + get_key_version_str(), + )?; + // Configures the disk_store to store records under the provided path and increase the max record size // The storage dir is appendixed with key_version str to avoid bringing records from old network into new - let storage_dir_path = root_dir.join(format!("record_store_{}", get_key_version_str())); + if let Err(error) = std::fs::create_dir_all(&storage_dir_path) { return Err(NetworkError::FailedToCreateRecordStoreDir { path: storage_dir_path, @@ -707,6 +719,45 @@ impl NetworkBuilder { } } +fn check_and_wipe_storage_dir_if_necessary( + root_dir: PathBuf, + storage_dir_path: PathBuf, + cur_version_str: String, +) -> Result<()> { + let mut prev_version_str = String::new(); + let version_file = root_dir.join("network_key_version"); + { + match fs::File::open(version_file.clone()) { + Ok(mut file) => { + file.read_to_string(&mut prev_version_str)?; + } + Err(err) => { + warn!("Failed in accessing version file {version_file:?}: {err:?}"); + // Assuming file was not created yet + info!("Creating a new version file at {version_file:?}"); + fs::File::create(version_file.clone())?; + } + } + } + + // In case of version mismatch: + // * the storage_dir shall be wiped out + // * the version file shall be updated + if cur_version_str != prev_version_str { + warn!("Trying to wipe out storege dir {storage_dir_path:?}, as cur_version {cur_version_str:?} doesn't match prev_version {prev_version_str:?}"); + let _ = fs::remove_dir_all(storage_dir_path); + + let mut file = fs::OpenOptions::new() + .write(true) + .truncate(true) + .open(version_file.clone())?; + info!("Writing cur_version {cur_version_str:?} into version file at {version_file:?}"); + file.write_all(cur_version_str.as_bytes())?; + } + + Ok(()) +} + pub struct SwarmDriver { pub(crate) swarm: Swarm, pub(crate) self_peer_id: PeerId, @@ -1068,3 +1119,66 @@ impl SwarmDriver { Ok(()) } } + +#[cfg(test)] +mod tests { + use super::check_and_wipe_storage_dir_if_necessary; + + use std::{fs, io::Read}; + + #[tokio::test] + async fn version_file_update() { + let temp_dir = std::env::temp_dir(); + let unique_dir_name = uuid::Uuid::new_v4().to_string(); + let root_dir = temp_dir.join(unique_dir_name); + fs::create_dir_all(&root_dir).expect("Failed to create root directory"); + + let version_file = root_dir.join("network_key_version"); + let storage_dir = root_dir.join("record_store"); + + let cur_version = uuid::Uuid::new_v4().to_string(); + assert!(check_and_wipe_storage_dir_if_necessary( + root_dir.clone(), + storage_dir.clone(), + cur_version.clone() + ) + .is_ok()); + { + let mut content_str = String::new(); + let mut file = fs::OpenOptions::new() + .read(true) + .open(version_file.clone()) + .expect("Failed to open version file"); + file.read_to_string(&mut content_str) + .expect("Failed to read from version file"); + assert_eq!(content_str, cur_version); + + drop(file); + } + + fs::create_dir_all(&storage_dir).expect("Failed to create storage directory"); + assert!(fs::metadata(storage_dir.clone()).is_ok()); + + let cur_version = uuid::Uuid::new_v4().to_string(); + assert!(check_and_wipe_storage_dir_if_necessary( + root_dir.clone(), + storage_dir.clone(), + cur_version.clone() + ) + .is_ok()); + { + let mut content_str = String::new(); + let mut file = fs::OpenOptions::new() + .read(true) + .open(version_file.clone()) + .expect("Failed to open version file"); + file.read_to_string(&mut content_str) + .expect("Failed to read from version file"); + assert_eq!(content_str, cur_version); + + drop(file); + } + // The storage_dir shall be removed as version_key changed + assert!(fs::metadata(storage_dir.clone()).is_err()); + } +} From dc2ae9750656be1a6ba62f62375823b624df7db0 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Mon, 21 Oct 2024 16:01:06 +0200 Subject: [PATCH 042/128] refactor(autonomi): user data refactor --- autonomi/src/client/vault_user_data.rs | 30 +++++------- autonomi/src/client/wasm.rs | 63 ++++++++++---------------- autonomi/tests-js/index.js | 11 ++--- 3 files changed, 39 insertions(+), 65 deletions(-) diff --git a/autonomi/src/client/vault_user_data.rs b/autonomi/src/client/vault_user_data.rs index a45a2adaea..6533c738dc 100644 --- a/autonomi/src/client/vault_user_data.rs +++ b/autonomi/src/client/vault_user_data.rs @@ -37,13 +37,11 @@ pub struct UserData { pub register_sk: Option, /// Owned register addresses pub registers: HashSet, - /// Owned file archive addresses - pub file_archives: HashSet, + /// Owned file archive addresses, along with an optional name for that archive + pub file_archives: HashMap>, /// Owner register names, providing it is optional pub register_names: HashMap, - /// Owned file archive addresses along with a name for that archive providing it is optional - pub file_archive_names: HashMap, } /// Errors that can occur during the get operation. @@ -65,31 +63,25 @@ impl UserData { Self::default() } - /// Add an archive. Returning true if the archive was newly added. - pub fn add_file_archive(&mut self, archive: ArchiveAddr) -> bool { - self.file_archives.insert(archive) + /// Add an archive. Returning `Some` (with the optional old name) if the archive was already in the set. + pub fn add_file_archive(&mut self, archive: ArchiveAddr) -> Option> { + self.file_archives.insert(archive, None) } - /// Add a name for an archive. Returning the old archive if it existed. - pub fn add_file_archive_name( + /// Add an archive. Returning `Some` (with the optional old name) if the archive was already in the set. + pub fn add_file_archive_with_name( &mut self, archive: ArchiveAddr, name: String, - ) -> Option { - self.file_archive_names.insert(name, archive) + ) -> Option> { + self.file_archives.insert(archive, Some(name)) } - /// Remove an archive. Returning true if the archive was removed. - pub fn remove_file_archive(&mut self, archive: ArchiveAddr) -> bool { - // TODO: Should we also remove the name? + /// Remove an archive. Returning `Some` (with the optional old name) if the archive was in the set. + pub fn remove_file_archive(&mut self, archive: ArchiveAddr) -> Option> { self.file_archives.remove(&archive) } - /// Remove a archive name. Returning the archive if it existed. - pub fn remove_file_archive_name(&mut self, name: String) -> Option { - self.file_archive_names.remove(&name) - } - /// To bytes pub fn to_bytes(&self) -> Result { let bytes = rmp_serde::to_vec(&self)?; diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index 6ac80e2558..bae5cc8eba 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -148,54 +148,40 @@ mod vault { Self(UserData::new()) } - #[wasm_bindgen(js_name = addArchive)] - pub fn add_archive( + #[wasm_bindgen(js_name = addFileArchive)] + pub fn add_file_archive( &mut self, archive: String, name: Option, ) -> Result<(), JsError> { let archive = str_to_addr(&archive)?; - // TODO: Log when archive already exists? - self.0.add_file_archive(archive); + let old_name = if let Some(ref name) = name { + self.0.add_file_archive_with_name(archive, name.clone()) + } else { + self.0.add_file_archive(archive) + }; - if let Some(name) = name { - if let Some(old_archive) = self.0.add_file_archive_name(archive, name.clone()) { - tracing::warn!( - "Overwriting archive stored as '{name}': {old_archive} -> {archive}" - ); - } + if let Some(old_name) = old_name { + tracing::warn!("Overwriting archive (`{archive}`): `{old_name:?}` -> `{name:?}`"); } Ok(()) } - #[wasm_bindgen(js_name = removeArchive)] - pub fn remove_archive(&mut self, archive: String) -> Result<(), JsError> { + #[wasm_bindgen(js_name = removeFileArchive)] + pub fn remove_file_archive(&mut self, archive: String) -> Result<(), JsError> { let archive = str_to_addr(&archive)?; self.0.remove_file_archive(archive); Ok(()) } - #[wasm_bindgen(js_name = removeArchiveName)] - pub fn remove_archive_name(&mut self, name: String) -> Result<(), JsError> { - let _archive_name = self.0.remove_file_archive_name(name); - - Ok(()) - } - - #[wasm_bindgen(js_name = archives)] - pub fn archives(&self) -> Result { + #[wasm_bindgen(js_name = fileArchives)] + pub fn file_archives(&self) -> Result { let archives = serde_wasm_bindgen::to_value(&self.0.file_archives)?; Ok(archives) } - - #[wasm_bindgen(js_name = archiveNames)] - pub fn archive_names(&self) -> Result { - let archives = serde_wasm_bindgen::to_value(&self.0.file_archive_names)?; - Ok(archives) - } } #[wasm_bindgen(js_class = Client)] @@ -203,12 +189,9 @@ mod vault { #[wasm_bindgen(js_name = getUserDataFromVault)] pub async fn get_user_data_from_vault( &self, - secret_key: Vec, + secret_key: &SecretKeyJs, ) -> Result { - let secret_key: [u8; 32] = secret_key[..].try_into()?; - let secret_key = SecretKey::from_bytes(secret_key)?; - - let user_data = self.0.get_user_data_from_vault(&secret_key).await?; + let user_data = self.0.get_user_data_from_vault(&secret_key.0).await?; Ok(JsUserData(user_data)) } @@ -216,15 +199,12 @@ mod vault { #[wasm_bindgen(js_name = putUserDataToVault)] pub async fn put_user_data_to_vault( &self, - user_data: JsUserData, + user_data: &JsUserData, wallet: &JsWallet, - secret_key: Vec, + secret_key: &SecretKeyJs, ) -> Result<(), JsError> { - let secret_key: [u8; 32] = secret_key[..].try_into()?; - let secret_key = SecretKey::from_bytes(secret_key)?; - self.0 - .put_user_data_to_vault(&secret_key, &wallet.0, user_data.0) + .put_user_data_to_vault(&secret_key.0, &wallet.0, user_data.0.clone()) .await?; Ok(()) @@ -232,10 +212,13 @@ mod vault { } } +#[wasm_bindgen(js_name = SecretKey)] +pub struct SecretKeyJs(bls::SecretKey); + #[wasm_bindgen(js_name = genSecretKey)] -pub fn gen_secret_key() -> Vec { +pub fn gen_secret_key() -> SecretKeyJs { let secret_key = bls::SecretKey::random(); - secret_key.to_bytes().to_vec() + SecretKeyJs(secret_key) } #[wasm_bindgen(js_name = Wallet)] diff --git a/autonomi/tests-js/index.js b/autonomi/tests-js/index.js index 75b3b76312..1dd1dffac0 100644 --- a/autonomi/tests-js/index.js +++ b/autonomi/tests-js/index.js @@ -63,12 +63,11 @@ describe('autonomi', function () { const archiveAddr = await client.archivePut(archive, wallet); const userData = new atnm.UserData(); - userData.addArchive(archiveAddr, 'foo'); + userData.addFileArchive(archiveAddr, 'foo'); - await client.putUserDataToVault(data, wallet, secretKey); - const userDataFetched = await client.put_user_data_to_vault(secretKey); - - assert.deepEqual(userDataFetched.archives(), userData.archives()); - assert.deepEqual(userDataFetched.archiveNames(), userData.archiveNames()); + await client.putUserDataToVault(userData, wallet, secretKey); + const userDataFetched = await client.getUserDataFromVault(secretKey); + + assert.deepEqual(userDataFetched.fileArchives(), userData.fileArchives()); }); }); From 284608d887d06625e487834f4749eccf586a7cc7 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 22 Oct 2024 08:54:40 +0200 Subject: [PATCH 043/128] refactor(autonomi): use empty string as default --- autonomi/src/client/vault_user_data.rs | 28 +++++++++++--------------- autonomi/src/client/wasm.rs | 4 +++- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/autonomi/src/client/vault_user_data.rs b/autonomi/src/client/vault_user_data.rs index 6533c738dc..e40e73e260 100644 --- a/autonomi/src/client/vault_user_data.rs +++ b/autonomi/src/client/vault_user_data.rs @@ -7,7 +7,6 @@ // permissions and limitations relating to use of the SAFE Network Software. use std::collections::HashMap; -use std::collections::HashSet; use super::archive::ArchiveAddr; use super::data::GetError; @@ -35,13 +34,10 @@ pub static USER_DATA_VAULT_CONTENT_IDENTIFIER: LazyLock = pub struct UserData { /// The register secret key hex encoded pub register_sk: Option, - /// Owned register addresses - pub registers: HashSet, - /// Owned file archive addresses, along with an optional name for that archive - pub file_archives: HashMap>, - - /// Owner register names, providing it is optional - pub register_names: HashMap, + /// Owned register addresses, along with their names (can be empty) + pub registers: HashMap, + /// Owned file archive addresses, along with their names (can be empty) + pub file_archives: HashMap, } /// Errors that can occur during the get operation. @@ -63,22 +59,22 @@ impl UserData { Self::default() } - /// Add an archive. Returning `Some` (with the optional old name) if the archive was already in the set. - pub fn add_file_archive(&mut self, archive: ArchiveAddr) -> Option> { - self.file_archives.insert(archive, None) + /// Add an archive. Returning `Option::Some` with the old name if the archive was already in the set. + pub fn add_file_archive(&mut self, archive: ArchiveAddr) -> Option { + self.file_archives.insert(archive, "".into()) } - /// Add an archive. Returning `Some` (with the optional old name) if the archive was already in the set. + /// Add an archive. Returning `Option::Some` with the old name if the archive was already in the set. pub fn add_file_archive_with_name( &mut self, archive: ArchiveAddr, name: String, - ) -> Option> { - self.file_archives.insert(archive, Some(name)) + ) -> Option { + self.file_archives.insert(archive, name) } - /// Remove an archive. Returning `Some` (with the optional old name) if the archive was in the set. - pub fn remove_file_archive(&mut self, archive: ArchiveAddr) -> Option> { + /// Remove an archive. Returning `Option::Some` with the old name if the archive was already in the set. + pub fn remove_file_archive(&mut self, archive: ArchiveAddr) -> Option { self.file_archives.remove(&archive) } diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index bae5cc8eba..d4dd7cf674 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -163,7 +163,9 @@ mod vault { }; if let Some(old_name) = old_name { - tracing::warn!("Overwriting archive (`{archive}`): `{old_name:?}` -> `{name:?}`"); + tracing::warn!( + "Changing name of archive `{archive}` from `{old_name:?}` to `{name:?}`" + ); } Ok(()) From 28ca53efe4cda3c3ad11fa64e10953913ec1a8e2 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Sat, 19 Oct 2024 17:42:35 +0530 Subject: [PATCH 044/128] chore: accumulate register at the network level during split - If we don't handle a split register at the lower level, then we will keep retrying until the GET retires run out - And it is upto the caller to make sure to merge the split record, which is hard to perform at every caller. e.g., replication fetch, PUT verification etc. --- autonomi/src/client/registers.rs | 20 +--- sn_networking/src/error.rs | 30 +++--- sn_networking/src/lib.rs | 159 ++++++++++++++++++++++++------- sn_node/src/put_validation.rs | 2 + 4 files changed, 145 insertions(+), 66 deletions(-) diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 611153a588..52f8944e1e 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -147,23 +147,11 @@ impl Client { try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; signed_reg } - // manage forked register case Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { - debug!("Forked register detected for {address:?} merging forks"); - let mut registers: Vec = vec![]; - for (_, (record, _)) in result_map { - registers.push( - try_deserialize_record(&record) - .map_err(|_| RegisterError::Serialization)?, - ); - } - let register = registers.iter().fold(registers[0].clone(), |mut acc, x| { - if let Err(e) = acc.merge(x) { - warn!("Ignoring forked register as we failed to merge conflicting registers at {}: {e}", x.address()); - } - acc - }); - register + error!("Got split record error for register at address: {address}. This should've been handled at the network layer"); + Err(RegisterError::Network(NetworkError::GetRecordError( + GetRecordError::SplitRecord { result_map }, + )))? } Err(e) => { error!("Failed to get register {address:?} from network: {e}"); diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 99bf1fbe92..c767ef8ab1 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -36,21 +36,22 @@ pub enum GetRecordError { got: usize, range: u32, }, - + #[error("Network query timed out")] + QueryTimeout, + #[error("Record retrieved from the network does not match the provided target record.")] + RecordDoesNotMatch(Record), + #[error("The record kind for the split records did not match")] + RecordKindMismatch, #[error("Record not found in the network")] RecordNotFound, - - // Avoid logging the whole `Record` content by accident + // Avoid logging the whole `Record` content by accident. + /// The split record error will be handled at the network layer. + /// For spends, it accumulates the spends and returns a double spend error if more than one. + /// For registers, it merges the registers and returns the merged record. #[error("Split Record has {} different copies", result_map.len())] SplitRecord { result_map: HashMap)>, }, - - #[error("Network query timed out")] - QueryTimeout, - - #[error("Record retrieved from the network does not match the provided target record.")] - RecordDoesNotMatch(Record), } impl Debug for GetRecordError { @@ -70,11 +71,6 @@ impl Debug for GetRecordError { .field("range", &range) .finish() } - Self::RecordNotFound => write!(f, "RecordNotFound"), - Self::SplitRecord { result_map } => f - .debug_struct("SplitRecord") - .field("result_map_count", &result_map.len()) - .finish(), Self::QueryTimeout => write!(f, "QueryTimeout"), Self::RecordDoesNotMatch(record) => { let pretty_key = PrettyPrintRecordKey::from(&record.key); @@ -82,6 +78,12 @@ impl Debug for GetRecordError { .field(&pretty_key) .finish() } + Self::RecordKindMismatch => write!(f, "RecordKindMismatch"), + Self::RecordNotFound => write!(f, "RecordNotFound"), + Self::SplitRecord { result_map } => f + .debug_struct("SplitRecord") + .field("result_map_count", &result_map.len()) + .finish(), } } } diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 1ab832f8a6..e47f593838 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -30,6 +30,7 @@ mod transfers; mod transport; use cmd::LocalSwarmCmd; +use sn_registers::SignedRegister; use xor_name::XorName; // re-export arch dependent deps for use in the crate, or above @@ -61,11 +62,15 @@ use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics, RewardsAddress}; use sn_protocol::{ error::Error as ProtocolError, messages::{ChunkProof, Cmd, Nonce, Query, QueryResponse, Request, Response}, - storage::{RecordType, RetryStrategy}, + storage::{ + try_deserialize_record, try_serialize_record, RecordHeader, RecordKind, RecordType, + RetryStrategy, + }, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; +use sn_transfers::SignedSpend; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, HashSet}, net::IpAddr, sync::Arc, }; @@ -671,16 +676,15 @@ impl Network { /// In case a target_record is provided, only return when fetched target. /// Otherwise count it as a failure when all attempts completed. /// + /// It also handles the split record error for spends and registers. + /// For spends, it accumulates the spends and returns an error if more than one. + /// For registers, it merges the registers and returns the merged record. #[cfg(not(target_arch = "wasm32"))] pub async fn get_record_from_network( &self, key: RecordKey, cfg: &GetRecordCfg, ) -> Result { - use std::collections::BTreeSet; - - use sn_transfers::SignedSpend; - let retry_duration = cfg.retry_strategy.map(|strategy| strategy.get_duration()); backoff::future::retry( backoff::ExponentialBackoff { @@ -720,39 +724,16 @@ impl Network { Err(GetRecordError::RecordNotFound) => { warn!("No holder of record '{pretty_key:?}' found."); } + // This is returned during SplitRecordError, we should not get this error here. + Err(GetRecordError::RecordKindMismatch) => { + error!("Record kind mismatch for {pretty_key:?}. This error should not happen here."); + } Err(GetRecordError::SplitRecord { result_map }) => { error!("Encountered a split record for {pretty_key:?}."); - - // attempt to deserialise and accumulate any spends - let mut accumulated_spends = BTreeSet::new(); - let results_count = result_map.len(); - // try and accumulate any SpendAttempts - if results_count > 1 { - info!("For record {pretty_key:?}, we have more than one result returned."); - // Allow for early bail if we've already seen a split SpendAttempt - for (record, _) in result_map.values() { - match get_raw_signed_spends_from_record(record) { - Ok(spends) => { - accumulated_spends.extend(spends); - } - Err(_) => { - continue; - } - } - } + if let Some(record) = Self::handle_split_record_error(result_map, &key)? { + info!("Merged the split record (register) for {pretty_key:?}, into a single record"); + return Ok(record); } - - // we have a Double SpendAttempt and will exit - if accumulated_spends.len() > 1 { - info!("For record {pretty_key:?} task found split record for a spend, accumulated and sending them as a single record"); - let accumulated_spends = - accumulated_spends.into_iter().collect::>(); - - return Err(backoff::Error::Permanent(NetworkError::DoubleSpendAttempt( - accumulated_spends, - ))); - } - } Err(GetRecordError::QueryTimeout) => { error!("Encountered query timeout for {pretty_key:?}."); @@ -777,6 +758,111 @@ impl Network { .await } + /// Handle the split record error. + /// Spend: Accumulate spends and return error if more than one. + /// Register: Merge registers and return the merged record. + fn handle_split_record_error( + result_map: &HashMap)>, + key: &RecordKey, + ) -> std::result::Result, backoff::Error> { + let pretty_key = PrettyPrintRecordKey::from(key); + + // attempt to deserialise and accumulate any spends or registers + let results_count = result_map.len(); + let mut accumulated_spends = HashSet::new(); + let mut collected_registers = Vec::new(); + + if results_count > 1 { + let mut record_kind = None; + info!("For record {pretty_key:?}, we have more than one result returned."); + for (record, _) in result_map.values() { + let Ok(header) = RecordHeader::from_record(record) else { + continue; + }; + let kind = record_kind.get_or_insert(header.kind); + if *kind != header.kind { + error!("Encountered a split record for {pretty_key:?} with different RecordHeaders. Expected {kind:?} but got {:?}",header.kind); + return Err(backoff::Error::Permanent(NetworkError::GetRecordError( + GetRecordError::RecordKindMismatch, + ))); + } + + // Accumulate the spends + if kind == &RecordKind::Spend { + info!("For record {pretty_key:?}, we have a split record for a spend attempt. Accumulating spends"); + + match get_raw_signed_spends_from_record(record) { + Ok(spends) => { + accumulated_spends.extend(spends); + } + Err(_) => { + continue; + } + } + } + // Accumulate the registers + else if kind == &RecordKind::Register { + info!("For record {pretty_key:?}, we have a split record for a register. Accumulating registers"); + let Ok(register) = try_deserialize_record::(record) else { + error!( + "Failed to deserialize register {pretty_key}. Skipping accumulation" + ); + continue; + }; + + match register.verify() { + Ok(_) => { + collected_registers.push(register); + } + Err(_) => { + error!( + "Failed to verify register for {pretty_key} at address: {}. Skipping accumulation", + register.address() + ); + continue; + } + } + } + } + } + + // Allow for early bail if we've already seen a split SpendAttempt + if accumulated_spends.len() > 1 { + info!("For record {pretty_key:?} task found split record for a spend, accumulated and sending them as a single record"); + let accumulated_spends = accumulated_spends.into_iter().collect::>(); + + return Err(backoff::Error::Permanent(NetworkError::DoubleSpendAttempt( + accumulated_spends, + ))); + } else if !collected_registers.is_empty() { + info!("For record {pretty_key:?} task found multiple registers, merging them."); + let signed_register = collected_registers.iter().fold(collected_registers[0].clone(), |mut acc, x| { + if let Err(e) = acc.merge(x) { + warn!("Ignoring forked register as we failed to merge conflicting registers at {}: {e}", x.address()); + } + acc + }); + + let record_value = try_serialize_record(&signed_register, RecordKind::Register) + .map_err(|err| { + error!( + "Error while serializing the merged register for {pretty_key:?}: {err:?}" + ); + backoff::Error::Permanent(NetworkError::from(err)) + })? + .to_vec(); + + let record = Record { + key: key.clone(), + value: record_value, + publisher: None, + expires: None, + }; + return Ok(Some(record)); + } + Ok(None) + } + /// Get the cost of storing the next record from the network pub async fn get_local_storecost( &self, @@ -852,6 +938,7 @@ impl Network { ); self.put_record_once(record.clone(), cfg).await.map_err(|err| { + // FIXME: Skip if we get a permanent error during verification, e.g., DoubleSpendAttempt warn!("Failed to PUT record with key: {pretty_key:?} to network (retry via backoff) with error: {err:?}"); if cfg.retry_strategy.is_some() { diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index b0dd3f6857..94a1e2aebb 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -743,6 +743,8 @@ impl Node { // get spends from the network at the address for that unique pubkey let network_spends = match self.network().get_raw_spends(spend_addr).await { Ok(spends) => spends, + // Fixme: We don't return SplitRecord Error for spends, instead we return NetworkError::DoubleSpendAttempt. + // The fix should also consider/change all the places we try to get spends, for eg `get_raw_signed_spends_from_record` etc. Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { warn!("Got a split record (double spend) for {unique_pubkey:?} from the network"); let mut spends = vec![]; From c8cb53eca3309d763e0d1327ec806e046555dfd7 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 22 Oct 2024 14:17:01 +0200 Subject: [PATCH 045/128] feat: added metamask wasm bindings and js example --- autonomi/Cargo.toml | 1 + autonomi/README.md | 32 +++++-- autonomi/examples/metamask/index.html | 26 ++++++ autonomi/examples/metamask/index.js | 114 +++++++++++++++++++++++++ autonomi/src/client/external_signer.rs | 14 ++- autonomi/src/client/utils.rs | 20 +++-- autonomi/src/client/wasm.rs | 85 ++++++++++++++++++ autonomi/src/utils.rs | 7 +- evmlib/src/external_signer.rs | 6 +- sn_evm/src/lib.rs | 1 + 10 files changed, 282 insertions(+), 24 deletions(-) create mode 100644 autonomi/examples/metamask/index.html create mode 100644 autonomi/examples/metamask/index.js diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index b247597b8f..2ec971c006 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -75,6 +75,7 @@ js-sys = "0.3.70" test_utils = { path = "../test_utils" } tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-web = "0.1.3" +xor_name = { version = "5.0.0", features = ["serialize-hex"] } [lints] workspace = true diff --git a/autonomi/README.md b/autonomi/README.md index c067c97bb9..5b95af38e4 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -26,7 +26,7 @@ autonomi = { path = "../autonomi", version = "0.1.0" } cargo run --bin evm_testnet ``` -3. Run a local network with the `local` feature and use the local evm node. +3. Run a local network with the `local` feature and use the local evm node. ```sh cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address evm-local @@ -63,34 +63,56 @@ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo t ### WebAssembly To run a WASM test + - Install `wasm-pack` -- Make sure your Rust supports the `wasm32-unknown-unknown` target. (If you have `rustup`: `rustup target add wasm32-unknown-unknown`.) -- Pass a bootstrap peer via `SAFE_PEERS`. This *has* to be the websocket address, e.g. `/ip4//tcp//ws/p2p/`. +- Make sure your Rust supports the `wasm32-unknown-unknown` target. (If you + have `rustup`: `rustup target add wasm32-unknown-unknown`.) +- Pass a bootstrap peer via `SAFE_PEERS`. This *has* to be the websocket address, + e.g. `/ip4//tcp//ws/p2p/`. - As well as the other environment variables needed for EVM payments (e.g. `RPC_URL`). - Optionally specify the specific test, e.g. `-- put` to run `put()` in `wasm.rs` only. Example: + ```sh SAFE_PEERS=/ip4//tcp//ws/p2p/ wasm-pack test --release --firefox autonomi --features=data,files --test wasm -- put ``` #### Test from JS in the browser -`wasm-pack test` does not execute JavaScript, but runs mostly WebAssembly. Again make sure the environment variables are set and build the JS package: +`wasm-pack test` does not execute JavaScript, but runs mostly WebAssembly. Again make sure the environment variables are +set and build the JS package: ```sh wasm-pack build --dev --target=web autonomi --features=vault ``` Then cd into `autonomi/tests-js`, and use `npm` to install and serve the test html file. + ``` cd autonomi/tests-js npm install npm run serve ``` -Then go to `http://127.0.0.1:8080/tests-js` in the browser. Here, enter a `ws` multiaddr of a local node and press 'run'. +Then go to `http://127.0.0.1:8080/tests-js` in the browser. Here, enter a `ws` multiaddr of a local node and press ' +run'. + +#### MetaMask example + +There is a MetaMask example for doing a simple put operation. + +Build the package with the `external-signer` feature (and again with the env variables) and run a webserver, e.g. with +Python: + +```sh +wasm-pack build --dev --target=web autonomi --features=external-signer +python -m http.server --directory=autonomi 8000 +``` + +Then visit `http://127.0.0.1:8000/examples/metamask` in your (modern) browser. +Here, enter a `ws` multiaddr of a local node and press 'run'. ## Faucet (local) diff --git a/autonomi/examples/metamask/index.html b/autonomi/examples/metamask/index.html new file mode 100644 index 0000000000..50844bd7f9 --- /dev/null +++ b/autonomi/examples/metamask/index.html @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/autonomi/examples/metamask/index.js b/autonomi/examples/metamask/index.js new file mode 100644 index 0000000000..862b433630 --- /dev/null +++ b/autonomi/examples/metamask/index.js @@ -0,0 +1,114 @@ +import init, * as autonomi from '../../pkg/autonomi.js'; + +export async function externalSignerPut(peerAddr) { + try { + // Check if MetaMask (window.ethereum) is available + if (typeof window.ethereum === 'undefined') { + throw new Error('MetaMask is not installed'); + } + + // Request account access from MetaMask + const accounts = await window.ethereum.request({method: 'eth_requestAccounts'}); + const sender = accounts[0]; // Get the first account + + // Setup API client + await init(); + + autonomi.logInit("autonomi=trace"); + + const client = await autonomi.Client.connect([peerAddr]); + + // Random bytes to be uploaded + const data = [...Array(16)].map(() => Math.floor(Math.random() * 9)); + + // Get quotes and payment information (this would need actual implementation) + const [quotes, quotePayments, free_chunks] = await client.getQuotes(data); + + // Get the EVM network + let evmNetwork = autonomi.getEvmNetwork(); + + // Form quotes payment calldata + const payForQuotesCalldata = autonomi.getPayForQuotesCalldata( + evmNetwork, + quotePayments + ); + + // Form approve to spend tokens calldata + const approveCalldata = autonomi.getApproveToSpendTokensCalldata( + evmNetwork, + payForQuotesCalldata.approve_spender, + payForQuotesCalldata.approve_amount + ); + + // Approve to spend tokens + await sendTransaction({ + from: sender, + to: approveCalldata[1], + data: approveCalldata[0] + }); + + let payments = {}; + + // Execute batched quote payment transactions + for (const [calldata, quoteHashes] of payForQuotesCalldata.batched_calldata_map) { + const txHash = await sendTransaction({ + from: sender, + to: payForQuotesCalldata.to, + data: calldata + }); + + // Record the transaction hashes for each quote + quoteHashes.forEach(quoteHash => { + payments[quoteHash] = txHash; + }); + } + + // Generate payment proof + const proof = autonomi.getPaymentProofFromQuotesAndPayments(quotes, payments); + + // Submit the data with proof of payment + const addr = await client.dataPutWithProof(data, proof); + + // Wait for a few seconds to allow data to propagate + await new Promise(resolve => setTimeout(resolve, 10000)); + + // Fetch the data back + const fetchedData = await client.dataGet(addr); + const originalData = new Uint8Array(data); + + if (fetchedData === originalData) { + console.log("Fetched data matches the original data!"); + } else { + throw new Error("Fetched data does not match original data!") + } + + console.log("Data successfully put and verified!"); + + } catch (error) { + console.error("An error occurred:", error); + } +} + +// Helper function to send a transaction through MetaMask using Ethereum JSON-RPC +async function sendTransaction({from, to, data}) { + const transactionParams = { + from: from, // Sender address + to: to, // Destination address + data: data, // Calldata (transaction input) + }; + + try { + // Send the transaction via MetaMask and get the transaction hash + const txHash = await window.ethereum.request({ + method: 'eth_sendTransaction', + params: [transactionParams] + }); + + console.log(`Transaction sent with hash: ${txHash}`); + return txHash; // Return the transaction hash + + } catch (error) { + console.error("Failed to send transaction:", error); + throw error; + } +} \ No newline at end of file diff --git a/autonomi/src/client/external_signer.rs b/autonomi/src/client/external_signer.rs index 143e8340eb..b17002bd9c 100644 --- a/autonomi/src/client/external_signer.rs +++ b/autonomi/src/client/external_signer.rs @@ -3,8 +3,7 @@ use crate::client::utils::extract_quote_payments; use crate::self_encryption::encrypt; use crate::Client; use bytes::Bytes; -use sn_evm::{ProofOfPayment, QuotePayment}; -use sn_networking::PayeeQuote; +use sn_evm::{PaymentQuote, ProofOfPayment, QuotePayment}; use sn_protocol::storage::Chunk; use std::collections::HashMap; use xor_name::XorName; @@ -34,7 +33,7 @@ impl Client { data: Bytes, ) -> Result< ( - HashMap, + HashMap, Vec, Vec, ), @@ -42,7 +41,14 @@ impl Client { > { // Encrypt the data as chunks let (_data_map_chunk, _chunks, xor_names) = encrypt_data(data)?; - let cost_map = self.get_store_quotes(xor_names.into_iter()).await?; + + let cost_map: HashMap = self + .get_store_quotes(xor_names.into_iter()) + .await? + .into_iter() + .map(|(name, (_, _, q))| (name, q)) + .collect(); + let (quote_payments, free_chunks) = extract_quote_payments(&cost_map); Ok((cost_map, quote_payments, free_chunks)) } diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index 33dde8d7b9..68ae70f2f7 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -12,7 +12,7 @@ use bytes::Bytes; use libp2p::kad::{Quorum, Record}; use rand::{thread_rng, Rng}; use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; -use sn_evm::{EvmWallet, ProofOfPayment, QuotePayment}; +use sn_evm::{EvmWallet, PaymentQuote, ProofOfPayment, QuotePayment}; use sn_networking::{ GetRecordCfg, Network, NetworkError, PayeeQuote, PutRecordCfg, VerificationKind, }; @@ -149,7 +149,13 @@ impl Client { content_addrs: impl Iterator, wallet: &EvmWallet, ) -> Result<(HashMap, Vec), PayError> { - let cost_map = self.get_store_quotes(content_addrs).await?; + let cost_map = self + .get_store_quotes(content_addrs) + .await? + .into_iter() + .map(|(name, (_, _, q))| (name, q)) + .collect(); + let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); // TODO: the error might contain some succeeded quote payments as well. These should be returned on err, so that they can be skipped when retrying. @@ -227,20 +233,16 @@ async fn fetch_store_quote( /// Form to be executed payments and already executed payments from a cost map. pub(crate) fn extract_quote_payments( - cost_map: &HashMap, + cost_map: &HashMap, ) -> (Vec, Vec) { let mut to_be_paid = vec![]; let mut already_paid = vec![]; for (chunk_address, quote) in cost_map.iter() { - if quote.2.cost.is_zero() { + if quote.cost.is_zero() { already_paid.push(*chunk_address); } else { - to_be_paid.push(( - quote.2.hash(), - quote.2.rewards_address, - quote.2.cost.as_atto(), - )); + to_be_paid.push((quote.hash(), quote.rewards_address, quote.cost.as_atto())); } } diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index d4dd7cf674..c968acf640 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -214,6 +214,83 @@ mod vault { } } +#[cfg(feature = "external-signer")] +mod external_signer { + use super::*; + use crate::payment_proof_from_quotes_and_payments; + use sn_evm::external_signer::{approve_to_spend_tokens_calldata, pay_for_quotes_calldata}; + use sn_evm::EvmNetwork; + use sn_evm::ProofOfPayment; + use sn_evm::QuotePayment; + use sn_evm::{Amount, PaymentQuote}; + use sn_evm::{EvmAddress, QuoteHash, TxHash}; + use std::collections::{BTreeMap, HashMap}; + use wasm_bindgen::prelude::wasm_bindgen; + use wasm_bindgen::{JsError, JsValue}; + use xor_name::XorName; + + #[wasm_bindgen(js_class = Client)] + impl JsClient { + #[wasm_bindgen(js_name = getQuotes)] + pub async fn get_quotes_for_data(&self, data: Vec) -> Result { + let data = crate::Bytes::from(data); + let result = self.0.get_quotes_for_data(data).await?; + let js_value = serde_wasm_bindgen::to_value(&result)?; + Ok(js_value) + } + + #[wasm_bindgen(js_name = dataPutWithProof)] + pub async fn data_put_with_proof_of_payment( + &self, + data: Vec, + proof: JsValue, + ) -> Result { + let data = crate::Bytes::from(data); + let proof: HashMap = serde_wasm_bindgen::from_value(proof)?; + let xorname = self.0.data_put_with_proof_of_payment(data, proof).await?; + Ok(addr_to_str(xorname)) + } + } + + #[wasm_bindgen(js_name = getPayForQuotesCalldata)] + pub fn get_pay_for_quotes_calldata( + network: JsValue, + payments: JsValue, + ) -> Result { + let network: EvmNetwork = serde_wasm_bindgen::from_value(network)?; + let payments: Vec = serde_wasm_bindgen::from_value(payments)?; + let calldata = pay_for_quotes_calldata(&network, payments.into_iter())?; + let js_value = serde_wasm_bindgen::to_value(&calldata)?; + Ok(js_value) + } + + #[wasm_bindgen(js_name = getApproveToSpendTokensCalldata)] + pub fn get_approve_to_spend_tokens_calldata( + network: JsValue, + spender: JsValue, + amount: JsValue, + ) -> Result { + let network: EvmNetwork = serde_wasm_bindgen::from_value(network)?; + let spender: EvmAddress = serde_wasm_bindgen::from_value(spender)?; + let amount: Amount = serde_wasm_bindgen::from_value(amount)?; + let calldata = approve_to_spend_tokens_calldata(&network, spender, amount); + let js_value = serde_wasm_bindgen::to_value(&calldata)?; + Ok(js_value) + } + + #[wasm_bindgen(js_name = getPaymentProofFromQuotesAndPayments)] + pub fn get_payment_proof_from_quotes_and_payments( + quotes: JsValue, + payments: JsValue, + ) -> Result { + let quotes: HashMap = serde_wasm_bindgen::from_value(quotes)?; + let payments: BTreeMap = serde_wasm_bindgen::from_value(payments)?; + let proof = payment_proof_from_quotes_and_payments("es, &payments); + let js_value = serde_wasm_bindgen::to_value(&proof)?; + Ok(js_value) + } +} + #[wasm_bindgen(js_name = SecretKey)] pub struct SecretKeyJs(bls::SecretKey); @@ -233,6 +310,14 @@ pub fn funded_wallet() -> JsWallet { JsWallet(test_utils::evm::get_funded_wallet()) } +/// Get the current `EvmNetwork` that was set using environment variables that were used during the build process of this library. +#[wasm_bindgen(js_name = getEvmNetwork)] +pub fn evm_network() -> Result { + let evm_network = evmlib::utils::get_evm_network_from_env()?; + let js_value = serde_wasm_bindgen::to_value(&evm_network)?; + Ok(js_value) +} + /// Enable tracing logging in the console. /// /// A level could be passed like `trace` or `warn`. Or set for a specific module/crate diff --git a/autonomi/src/utils.rs b/autonomi/src/utils.rs index a7273f9bae..fc9ceb7718 100644 --- a/autonomi/src/utils.rs +++ b/autonomi/src/utils.rs @@ -1,15 +1,14 @@ -use sn_evm::{ProofOfPayment, QuoteHash, TxHash}; -use sn_networking::PayeeQuote; +use sn_evm::{PaymentQuote, ProofOfPayment, QuoteHash, TxHash}; use std::collections::{BTreeMap, HashMap}; use xor_name::XorName; pub fn payment_proof_from_quotes_and_payments( - quotes: &HashMap, + quotes: &HashMap, payments: &BTreeMap, ) -> HashMap { quotes .iter() - .filter_map(|(xor_name, (_, _, quote))| { + .filter_map(|(xor_name, quote)| { payments.get("e.hash()).map(|tx_hash| { ( *xor_name, diff --git a/evmlib/src/external_signer.rs b/evmlib/src/external_signer.rs index 83b43695d0..20c3aa95df 100644 --- a/evmlib/src/external_signer.rs +++ b/evmlib/src/external_signer.rs @@ -12,6 +12,7 @@ use crate::contract::network_token::NetworkToken; use crate::contract::{data_payments, network_token}; use crate::utils::http_provider; use crate::Network; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; #[derive(thiserror::Error, Debug)] @@ -48,6 +49,7 @@ pub fn transfer_tokens_calldata( network_token.transfer_calldata(receiver, amount) } +#[derive(Serialize, Deserialize)] pub struct PayForQuotesCalldataReturnType { pub batched_calldata_map: HashMap>, pub to: Address, @@ -67,7 +69,7 @@ pub fn pay_for_quotes_calldata>( let total_amount = payments.iter().map(|(_, _, amount)| amount).sum(); - let approve_to = *network.data_payments_address(); + let approve_spender = *network.data_payments_address(); let approve_amount = total_amount; let provider = http_provider(network.rpc_url().clone()); @@ -88,7 +90,7 @@ pub fn pay_for_quotes_calldata>( Ok(PayForQuotesCalldataReturnType { batched_calldata_map: calldata_map, to: *data_payments.contract.address(), - approve_spender: approve_to, + approve_spender, approve_amount, }) } diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index 222e4da326..fedb1afe68 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -10,6 +10,7 @@ extern crate tracing; pub use evmlib::common::Address as RewardsAddress; +pub use evmlib::common::Address as EvmAddress; pub use evmlib::common::QuotePayment; pub use evmlib::common::{QuoteHash, TxHash}; #[cfg(feature = "external-signer")] From 76f8b1eb0e49f0923647b03ec0604955208400b1 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 22 Oct 2024 18:11:37 +0530 Subject: [PATCH 046/128] fix(network): return kad event dropped if we cannot get the query id --- sn_networking/src/event/kad.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 77bf622fcb..8e903a00ec 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -412,6 +412,7 @@ impl SwarmDriver { let expected_answers = get_quorum_value(&cfg.get_quorum); trace!("Expecting {expected_answers:?} answers to exceed {expected_get_range:?} for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); + } else { // return error if the entry cannot be found return Err(NetworkError::ReceivedKademliaEventDropped { query_id, From 672ddc8663c7567bd31a65c69b323a92dd956389 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 22 Oct 2024 14:47:30 +0200 Subject: [PATCH 047/128] fix(autonomi): add wait for tx confirmation to the mm js test --- autonomi/examples/metamask/index.js | 47 +++++++++++++++++++++++++---- 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/autonomi/examples/metamask/index.js b/autonomi/examples/metamask/index.js index 862b433630..633eb36317 100644 --- a/autonomi/examples/metamask/index.js +++ b/autonomi/examples/metamask/index.js @@ -18,8 +18,8 @@ export async function externalSignerPut(peerAddr) { const client = await autonomi.Client.connect([peerAddr]); - // Random bytes to be uploaded - const data = [...Array(16)].map(() => Math.floor(Math.random() * 9)); + // Generate 1MB of random bytes in a Uint8Array + const data = new Uint8Array(1024 * 1024).map(() => Math.floor(Math.random() * 256)); // Get quotes and payment information (this would need actual implementation) const [quotes, quotePayments, free_chunks] = await client.getQuotes(data); @@ -40,23 +40,31 @@ export async function externalSignerPut(peerAddr) { payForQuotesCalldata.approve_amount ); + console.log("Sending approve transaction.."); + // Approve to spend tokens - await sendTransaction({ + let txHash = await sendTransaction({ from: sender, to: approveCalldata[1], data: approveCalldata[0] }); + await waitForTransactionConfirmation(txHash); + let payments = {}; // Execute batched quote payment transactions for (const [calldata, quoteHashes] of payForQuotesCalldata.batched_calldata_map) { - const txHash = await sendTransaction({ + console.log("Sending batched data payment transaction.."); + + let txHash = await sendTransaction({ from: sender, to: payForQuotesCalldata.to, data: calldata }); + await waitForTransactionConfirmation(txHash); + // Record the transaction hashes for each quote quoteHashes.forEach(quoteHash => { payments[quoteHash] = txHash; @@ -74,9 +82,8 @@ export async function externalSignerPut(peerAddr) { // Fetch the data back const fetchedData = await client.dataGet(addr); - const originalData = new Uint8Array(data); - if (fetchedData === originalData) { + if (fetchedData.toString() === data.toString()) { console.log("Fetched data matches the original data!"); } else { throw new Error("Fetched data does not match original data!") @@ -111,4 +118,32 @@ async function sendTransaction({from, to, data}) { console.error("Failed to send transaction:", error); throw error; } +} + +async function waitForTransactionConfirmation(txHash) { + const delay = (ms) => new Promise(resolve => setTimeout(resolve, ms)); + + // Poll for the transaction receipt + while (true) { + // Query the transaction receipt + const receipt = await window.ethereum.request({ + method: 'eth_getTransactionReceipt', + params: [txHash], + }); + + // If the receipt is found, the transaction has been mined + if (receipt !== null) { + // Check if the transaction was successful (status is '0x1') + if (receipt.status === '0x1') { + console.log('Transaction successful!', receipt); + return receipt; // Return the transaction receipt + } else { + console.log('Transaction failed!', receipt); + throw new Error('Transaction failed'); + } + } + + // Wait for 1 second before checking again + await delay(1000); + } } \ No newline at end of file From 71d5cabd0799a0240984a6f812952a3880cd4ad3 Mon Sep 17 00:00:00 2001 From: qima Date: Fri, 18 Oct 2024 19:13:50 +0800 Subject: [PATCH 048/128] chore(CI): re-enable memcheck test --- .github/workflows/memcheck.yml | 783 ++++++++++++--------------------- .github/workflows/merge.yml | 11 +- 2 files changed, 288 insertions(+), 506 deletions(-) diff --git a/.github/workflows/memcheck.yml b/.github/workflows/memcheck.yml index 20116e6149..c2e5406207 100644 --- a/.github/workflows/memcheck.yml +++ b/.github/workflows/memcheck.yml @@ -13,509 +13,282 @@ env: SAFE_DATA_PATH: /home/runner/.local/share/safe CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi NODE_DATA_PATH: /home/runner/.local/share/safe/node - BOOTSTRAP_NODE_DATA_PATH: /home/runner/.local/share/safe/bootstrap_node RESTART_TEST_NODE_DATA_PATH: /home/runner/.local/share/safe/restart_node - FAUCET_LOG_PATH: /home/runner/.local/share/safe/test_faucet/logs - -# jobs: -# memory-check: -# runs-on: ubuntu-latest -# steps: -# - name: Checkout code -# uses: actions/checkout@v4 - -# - name: Check we're on the right commit -# run: git log -1 --oneline - -# - name: Install Rust -# uses: dtolnay/rust-toolchain@stable - -# - uses: Swatinem/rust-cache@v2 -# continue-on-error: true - -# - name: install ripgrep -# shell: bash -# run: sudo apt-get install -y ripgrep - -# - name: Build binaries -# run: cargo build --release --bin safe --bin safenode -# timeout-minutes: 30 - -# - name: Build faucet binary with gifting -# run: cargo build --release --bin faucet --features gifting -# timeout-minutes: 30 - -# - name: Build tests -# run: cargo test --release -p sn_node --test data_with_churn --test verify_routing_table --no-run -# timeout-minutes: 30 - -# - name: Start a node instance that does not undergo churn -# run: | -# mkdir -p $BOOTSTRAP_NODE_DATA_PATH -# ./target/release/safenode --first \ -# --root-dir $BOOTSTRAP_NODE_DATA_PATH --log-output-dest $BOOTSTRAP_NODE_DATA_PATH --local --owner=bootstrap & -# sleep 10 -# env: -# SN_LOG: "all" - -# - name: Set SAFE_PEERS -# run: | -# safe_peers=$(rg "Local node is listening .+ on \".+\"" $BOOTSTRAP_NODE_DATA_PATH -u | \ -# rg '/ip4.*$' -m1 -o | rg '"' -r '') -# echo "SAFE_PEERS=$safe_peers" >> $GITHUB_ENV - -# - name: Check SAFE_PEERS was set -# shell: bash -# run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" - -# - name: Start a node instance to be restarted -# run: | -# mkdir -p $RESTART_TEST_NODE_DATA_PATH -# ./target/release/safenode \ -# --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restart & -# sleep 10 -# env: -# SN_LOG: "all" - -# - name: Start a local network -# env: -# SN_LOG: "all" -# uses: maidsafe/sn-local-testnet-action@main -# with: -# action: start -# build: true -# faucet-path: target/release/faucet -# interval: 2000 -# join: true -# node-path: target/release/safenode -# owner-prefix: node -# platform: ubuntu-latest -# set-safe-peers: false - -# # In this case we did *not* want SAFE_PEERS to be set to another value by starting the testnet -# - name: Check SAFE_PEERS was not changed -# shell: bash -# run: echo "The SAFE_PEERS variable has been set to ${SAFE_PEERS}" - -# - name: Create and fund a wallet to pay for files storage -# run: | -# echo "Obtaining address for use with the faucet..." -# ./target/release/safe --log-output-dest=data-dir wallet create --no-password -# address=$(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) -# echo "Sending tokens to the faucet at $address" -# ./target/release/faucet --log-output-dest=data-dir send 5000000 $address > initial_balance_from_faucet.txt -# cat initial_balance_from_faucet.txt -# cat initial_balance_from_faucet.txt | tail -n 1 > transfer_hex -# cat transfer_hex -# ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex -# env: -# SN_LOG: "all" -# timeout-minutes: 15 - -# - name: Move faucet log to the working folder -# run: | -# echo "SAFE_DATA_PATH has: " -# ls -l $SAFE_DATA_PATH -# echo "test_faucet foder has: " -# ls -l $SAFE_DATA_PATH/test_faucet -# echo "logs folder has: " -# ls -l $SAFE_DATA_PATH/test_faucet/logs -# mv $FAUCET_LOG_PATH/faucet.log ./faucet_log.log -# continue-on-error: true -# if: always() -# timeout-minutes: 1 - -# - name: Download 95mb file to be uploaded with the safe client -# shell: bash -# run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - -# # The resources file we upload may change, and with it mem consumption. -# # Be aware! -# - name: Start a client to upload files -# # -p makes files public -# run: | -# ls -l -# ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data.zip" --retry-strategy quick -p -# env: -# SN_LOG: "all" -# timeout-minutes: 25 - -# # this check needs to be after some transfer activity -# - name: Check we're warned about using default genesis -# run: | -# git log -1 --oneline -# ls -la $RESTART_TEST_NODE_DATA_PATH -# cat $RESTART_TEST_NODE_DATA_PATH/safenode.log -# - name: Check we're warned about using default genesis -# run: | -# git log -1 --oneline -# ls -la $BOOTSTRAP_NODE_DATA_PATH -# cat $BOOTSTRAP_NODE_DATA_PATH/safenode.log - -# - name: Check we're warned about using default genesis -# run: | -# git log -1 --oneline -# ls -la $NODE_DATA_PATH -# rg "USING DEFAULT" "$NODE_DATA_PATH" -u -# shell: bash - -# # Uploading same file using different client shall not incur any payment neither uploads -# # Note rg will throw an error directly in case of failed to find a matching pattern. -# - name: Start a different client to upload the same file -# run: | -# pwd -# mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first -# ls -l $SAFE_DATA_PATH -# ls -l $SAFE_DATA_PATH/client_first -# mkdir $SAFE_DATA_PATH/client -# ls -l $SAFE_DATA_PATH -# mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs -# ls -l $CLIENT_DATA_PATH -# cp ./the-test-data.zip ./the-test-data_1.zip -# ./target/release/safe --log-output-dest=data-dir wallet create --no-replace --no-password -# ./target/release/faucet --log-output-dest=data-dir send 5000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) > initial_balance_from_faucet_1.txt -# cat initial_balance_from_faucet_1.txt -# cat initial_balance_from_faucet_1.txt | tail -n 1 > transfer_hex -# cat transfer_hex -# ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex -# ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data_1.zip" --retry-strategy quick -p > second_upload.txt -# cat second_upload.txt -# rg "New wallet balance: 5000000.000000000" second_upload.txt -c --stats -# env: -# SN_LOG: "all" -# timeout-minutes: 25 - -# - name: Stop the restart node -# run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid ) - -# - name: Start the restart node again -# run: | -# ./target/release/safenode \ -# --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restarted & -# sleep 10 -# env: -# SN_LOG: "all" - -# - name: Assert we've reloaded some chunks -# run: rg "Existing record loaded" $RESTART_TEST_NODE_DATA_PATH - -# - name: Chunks data integrity during nodes churn -# run: cargo test --release -p sn_node --test data_with_churn -- --nocapture -# env: -# TEST_DURATION_MINS: 5 -# TEST_TOTAL_CHURN_CYCLES: 15 -# SN_LOG: "all" -# timeout-minutes: 30 - -# - name: Check current files -# run: ls -la -# - name: Check safenode file -# run: ls /home/runner/work/safe_network/safe_network/target/release - -# - name: Check there was no restart issues -# run: | -# if rg 'Failed to execute hard-restart command' $NODE_DATA_PATH; then -# echo "Restart issues detected" -# exit 1 -# else -# echo "No restart issues detected" -# fi - -# - name: Verify the routing tables of the nodes -# run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture -# env: -# SLEEP_BEFORE_VERIFICATION: 300 -# timeout-minutes: 10 - -# - name: Verify restart of nodes using rg -# shell: bash -# timeout-minutes: 1 -# # get the counts, then the specific line, and then the digit count only -# # then check we have an expected level of restarts -# # TODO: make this use an env var, or relate to testnet size -# run: | -# restart_count=$(rg "Node is restarting in" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "Restart $restart_count nodes" -# peer_removed=$(rg "PeerRemovedFromRoutingTable" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "PeerRemovedFromRoutingTable $peer_removed times" -# if [ $peer_removed -lt $restart_count ]; then -# echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" -# exit 1 -# fi -# node_count=$(ls $NODE_DATA_PATH | wc -l) -# echo "Node dir count is $node_count" -# # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here -# # if [ $restart_count -lt $node_count ]; then -# # echo "Restart count of: $restart_count is less than the node count of: $node_count" -# # exit 1 -# # fi - -# - name: Verify data replication using rg -# shell: bash -# timeout-minutes: 1 -# # get the counts, then the specific line, and then the digit count only -# # then check we have an expected level of replication -# # TODO: make this use an env var, or relate to testnet size -# # As the bootstrap_node using separate folder for logging, -# # hence the folder input to rg needs to cover that as well. -# run: | -# sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "Sent $sending_list_count replication lists" -# received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "Received $received_list_count replication lists" -# fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "Carried out $fetching_attempt_count fetching attempts" -# if: always() - -# - name: Start a client to download files -# run: | -# ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick -# ls -l $CLIENT_DATA_PATH/safe_files -# downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) -# if [ $downloaded_files -lt 1 ]; then -# echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" -# exit 1 -# fi -# env: -# SN_LOG: "all" -# timeout-minutes: 10 - -# # Download the same files again to ensure files won't get corrupted. -# - name: Start a client to download the same files again -# run: | -# ./target/release/safe --log-output-dest=data-dir files download --show-holders --retry-strategy quick -# ls -l $CLIENT_DATA_PATH/safe_files -# downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) -# if [ $downloaded_files -lt 1 ]; then -# echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" -# exit 1 -# fi -# file_size1=$(stat -c "%s" ./the-test-data_1.zip) -# file_size2=$(stat -c "%s" $CLIENT_DATA_PATH/safe_files/the-test-data_1.zip) -# if [ $file_size1 != $file_size2 ]; then -# echo "The downloaded file has a different size $file_size2 to the original $file_size1." -# exit 1 -# fi -# env: -# SN_LOG: "all" -# timeout-minutes: 10 - -# - name: Audit from genesis to collect entire spend DAG and dump to a dot file -# run: | -# ./target/release/safe --log-output-dest=data-dir wallet audit --dot --sk-str 49113d2083f57a976076adbe85decb75115820de1e6e74b47e0429338cef124a > spend_dag_and_statistics.txt -# echo "==============================================================================" -# cat spend_dag_and_statistics.txt -# env: -# SN_LOG: "all" -# timeout-minutes: 5 -# if: always() - -# - name: Ensure discord_ids decrypted -# run: | -# rg 'node_' ./spend_dag_and_statistics.txt -o -# timeout-minutes: 1 -# if: always() - -# - name: Check nodes running -# shell: bash -# timeout-minutes: 1 -# continue-on-error: true -# run: pgrep safenode | wc -l -# if: always() - -# - name: Wait before verifying reward forwarding -# run: sleep 300 - -# - name: Stop the local network and upload logs -# if: always() -# uses: maidsafe/sn-local-testnet-action@main -# with: -# action: stop -# log_file_prefix: safe_test_logs_memcheck -# platform: ubuntu-latest -# build: true - -# - name: Check node memory usage -# shell: bash -# # The resources file and churning chunk_size we upload may change, and with it mem consumption. -# # This is set to a value high enough to allow for some variation depending on -# # resources and node location in the network, but hopefully low enough to catch -# # any wild memory issues -# # Any changes to this value should be carefully considered and tested! -# # As we have a bootstrap node acting as an access point for churning nodes and client, -# # The memory usage here will be significantly higher here than in the benchmark test, -# # where we don't have a bootstrap node. -# run: | -# node_peak_mem_limit_mb="300" # mb - -# peak_mem_usage=$( -# rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | -# awk -F':' '/"memory_used_mb":/{print $2}' | -# sort -n | -# tail -n 1 -# ) -# echo "Node memory usage: $peak_mem_usage MB" - -# if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then -# echo "Node memory usage exceeded threshold: $peak_mem_usage MB" -# exit 1 -# fi -# if: always() - -# - name: Check client memory usage -# shell: bash -# # limits here are lower that benchmark tests as there is less going on. -# run: | -# client_peak_mem_limit_mb="1024" # mb -# client_avg_mem_limit_mb="512" # mb - -# peak_mem_usage=$( -# rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | -# awk -F':' '/"memory_used_mb":/{print $2}' | -# sort -n | -# tail -n 1 -# ) -# echo "Peak memory usage: $peak_mem_usage MB" -# if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then -# echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" -# exit 1 -# fi - -# total_mem=$( -# rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | -# awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' -# ) -# num_of_times=$( -# rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | -# rg "(\d+) matches" | -# rg "\d+" -o -# ) -# echo "num_of_times: $num_of_times" -# echo "Total memory is: $total_mem" -# average_mem=$(($total_mem/$(($num_of_times)))) -# echo "Average memory is: $average_mem" - -# if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then -# echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" -# exit 1 -# fi - -# - name: Check node swarm_driver handling statistics -# shell: bash -# # With the latest improvements, swarm_driver will be in high chance -# # has no super long handling (longer than 1s). -# # As the `rg` cmd will fail the shell directly if no entry find, -# # hence not covering it. -# # Be aware that if do need to looking for handlings longer than second, it shall be: -# # rg "SwarmCmd handled in [^m,µ,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats -# run: | -# num_of_times=$( -# rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | -# rg "(\d+) matches" | -# rg "\d+" -o -# ) -# echo "Number of long cmd handling times: $num_of_times" -# total_long_handling_ms=$( -# rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | -# awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' -# ) -# echo "Total cmd long handling time is: $total_long_handling_ms ms" -# average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) -# echo "Average cmd long handling time is: $average_handling_ms ms" -# total_long_handling=$(($total_long_handling_ms)) -# total_num_of_times=$(($num_of_times)) -# num_of_times=$( -# rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | -# rg "(\d+) matches" | -# rg "\d+" -o -# ) -# echo "Number of long event handling times: $num_of_times" -# total_long_handling_ms=$( -# rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | -# awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' -# ) -# echo "Total event long handling time is: $total_long_handling_ms ms" -# average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) -# echo "Average event long handling time is: $average_handling_ms ms" -# total_long_handling=$(($total_long_handling_ms+$total_long_handling)) -# total_num_of_times=$(($num_of_times+$total_num_of_times)) -# average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) -# echo "Total swarm_driver long handling times is: $total_num_of_times" -# echo "Total swarm_driver long handling duration is: $total_long_handling ms" -# echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" - -# - name: Verify reward forwarding using rg -# shell: bash -# timeout-minutes: 1 -# run: | -# min_reward_forwarding_times="100" -# reward_forwarding_count=$(rg "Reward forwarding completed sending spend" $NODE_DATA_PATH -c --stats | \ -# rg "(\d+) matches" | rg "\d+" -o) -# echo "Carried out $reward_forwarding_count reward forwardings" -# if (( $(echo "$reward_forwarding_count < $min_reward_forwarding_times" | bc -l) )); then -# echo "Reward forwarding times below the threshold: $min_reward_forwarding_times" -# exit 1 -# fi -# if: always() - -# - name: Upload payment wallet initialization log -# uses: actions/upload-artifact@main -# with: -# name: payment_wallet_initialization_log -# path: initial_balance_from_faucet.txt -# continue-on-error: true -# if: always() - -# - name: Move faucet log to the working folder -# run: | -# echo "current folder is:" -# pwd -# echo "SAFE_DATA_PATH has: " -# ls -l $SAFE_DATA_PATH -# echo "test_faucet foder has: " -# ls -l $SAFE_DATA_PATH/test_faucet -# echo "logs folder has: " -# ls -l $SAFE_DATA_PATH/test_faucet/logs -# mv $FAUCET_LOG_PATH/*.log ./faucet_log.log -# env: -# SN_LOG: "all" -# continue-on-error: true -# if: always() -# timeout-minutes: 1 - -# - name: Move bootstrap_node log to the working directory -# run: | -# ls -l $BOOTSTRAP_NODE_DATA_PATH -# mv $BOOTSTRAP_NODE_DATA_PATH/safenode.log ./bootstrap_node.log -# continue-on-error: true -# if: always() -# timeout-minutes: 1 - -# - name: Upload faucet log -# uses: actions/upload-artifact@main -# with: -# name: memory_check_faucet_log -# path: faucet_log.log -# continue-on-error: true -# if: always() - -# - name: Upload bootstrap_node log -# uses: actions/upload-artifact@main -# with: -# name: memory_check_bootstrap_node_log -# path: bootstrap_node.log -# continue-on-error: true -# if: always() - -# - name: Upload spend DAG and statistics -# uses: actions/upload-artifact@main -# with: -# name: memory_check_spend_dag_and_statistics -# path: spend_dag_and_statistics.txt -# continue-on-error: true -# if: always() + +jobs: + memory-check: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Check we're on the right commit + run: git log -1 --oneline + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + + - uses: Swatinem/rust-cache@v2 + continue-on-error: true + + - name: install ripgrep + shell: bash + run: sudo apt-get install -y ripgrep + + - name: Build binaries + run: cargo build --release --features local --bin safenode --bin autonomi + timeout-minutes: 30 + + - name: Start a local network + uses: maidsafe/sn-local-testnet-action@main + with: + action: start + enable-evm-testnet: true + node-path: target/release/safenode + platform: ubuntu-latest + build: true + + - name: Check SAFE_PEERS was set + shell: bash + run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" + + - name: Start a node instance to be restarted + run: | + mkdir -p $RESTART_TEST_NODE_DATA_PATH + ./target/release/safenode \ + --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --rewards-address "0x03B770D9cD32077cC0bF330c13C114a87643B124" & + sleep 10 + env: + SN_LOG: "all" + + - name: Download 95mb file to be uploaded with the safe client + shell: bash + run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip + + - name: export default secret key + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash + + - name: File upload + run: ./target/release/autonomi --log-output-dest=data-dir file upload "./the-test-data.zip" > ./upload_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 5 + + - name: showing the upload terminal output + run: cat upload_output + shell: bash + if: always() + + - name: parse address + run: | + UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output) + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV + shell: bash + + # Uploading same file using different client shall not incur any payment neither uploads + # Note rg will throw an error directly in case of failed to find a matching pattern. + - name: Start a different client to upload the same file + run: | + pwd + ls -l $SAFE_DATA_PATH + mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first + ls -l $SAFE_DATA_PATH + ls -l $SAFE_DATA_PATH/client_first + ls -l $SAFE_DATA_PATH/client_first/logs + mkdir $SAFE_DATA_PATH/client + ls -l $SAFE_DATA_PATH + cp ./the-test-data.zip ./the-test-data_1.zip + ./target/release/autonomi --log-output-dest=data-dir file upload "./the-test-data_1.zip" > ./second_upload 2>&1 + env: + SN_LOG: "all" + timeout-minutes: 25 + + - name: showing the second upload terminal output + run: cat second_upload + shell: bash + if: always() + + - name: Stop the restart node + run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid ) + + - name: Start the restart node again + run: | + ./target/release/safenode \ + --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --rewards-address "0x03B770D9cD32077cC0bF330c13C114a87643B124" & + sleep 10 + env: + SN_LOG: "all" + + # Records are encrypted, and seeds will change after restart + # Currently, there will be `Existing record found`, but NO `Existing record loaded` + # Due to the failure on decryption (as different seed used) + - name: Assert we've reloaded some chunks + run: rg "Existing record found" $RESTART_TEST_NODE_DATA_PATH + + - name: Verify data replication using rg + shell: bash + timeout-minutes: 1 + # get the counts, then the specific line, and then the digit count only + # then check we have an expected level of replication + run: | + sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Sent $sending_list_count replication lists" + received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Received $received_list_count replication lists" + fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Carried out $fetching_attempt_count fetching attempts" + if: always() + + - name: File Download + run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources + env: + SN_LOG: "v" + timeout-minutes: 2 + + - name: Check nodes running + shell: bash + timeout-minutes: 1 + continue-on-error: true + run: pgrep safenode | wc -l + if: always() + + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + log_file_prefix: safe_test_logs_memcheck + platform: ubuntu-latest + build: true + + - name: Check node memory usage + shell: bash + # The resources file and churning chunk_size we upload may change, and with it mem consumption. + # This is set to a value high enough to allow for some variation depending on + # resources and node location in the network, but hopefully low enough to catch + # any wild memory issues + # Any changes to this value should be carefully considered and tested! + # As we have a bootstrap node acting as an access point for churning nodes and client, + # The memory usage here will be significantly higher here than in the benchmark test, + # where we don't have a bootstrap node. + run: | + node_peak_mem_limit_mb="300" # mb + + peak_mem_usage=$( + rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/{print $2}' | + sort -n | + tail -n 1 + ) + echo "Node memory usage: $peak_mem_usage MB" + + if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then + echo "Node memory usage exceeded threshold: $peak_mem_usage MB" + exit 1 + fi + if: always() + + - name: Check client memory usage + shell: bash + # limits here are lower that benchmark tests as there is less going on. + run: | + client_peak_mem_limit_mb="1024" # mb + client_avg_mem_limit_mb="512" # mb + + peak_mem_usage=$( + rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/{print $2}' | + sort -n | + tail -n 1 + ) + echo "Peak memory usage: $peak_mem_usage MB" + if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then + echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" + exit 1 + fi + + total_mem=$( + rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' + ) + num_of_times=$( + rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob autonomi.* -c --stats | + rg "(\d+) matches" | + rg "\d+" -o + ) + echo "num_of_times: $num_of_times" + echo "Total memory is: $total_mem" + average_mem=$(($total_mem/$(($num_of_times)))) + echo "Average memory is: $average_mem" + + if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then + echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" + exit 1 + fi + + # Logging of handling time is on Trace level, + # meanwhile the local_network startup tool sets the logging level on Debug. + # + # - name: Check node swarm_driver handling statistics + # shell: bash + # # With the latest improvements, swarm_driver will be in high chance + # # has no super long handling (longer than 1s). + # # As the `rg` cmd will fail the shell directly if no entry find, + # # hence not covering it. + # # Be aware that if do need to looking for handlings longer than second, it shall be: + # # rg "SwarmCmd handled in [^m,µ,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats + # run: | + # num_of_times=$( + # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats | + # rg "(\d+) matches" | + # rg "\d+" -o + # ) + # echo "Number of long cmd handling times: $num_of_times" + # total_long_handling_ms=$( + # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename | + # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + # ) + # echo "Total cmd long handling time is: $total_long_handling_ms ms" + # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + # echo "Average cmd long handling time is: $average_handling_ms ms" + # total_long_handling=$(($total_long_handling_ms)) + # total_num_of_times=$(($num_of_times)) + # num_of_times=$( + # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats | + # rg "(\d+) matches" | + # rg "\d+" -o + # ) + # echo "Number of long event handling times: $num_of_times" + # total_long_handling_ms=$( + # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename | + # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + # ) + # echo "Total event long handling time is: $total_long_handling_ms ms" + # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + # echo "Average event long handling time is: $average_handling_ms ms" + # total_long_handling=$(($total_long_handling_ms+$total_long_handling)) + # total_num_of_times=$(($num_of_times+$total_num_of_times)) + # average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) + # echo "Total swarm_driver long handling times is: $total_num_of_times" + # echo "Total swarm_driver long handling duration is: $total_long_handling ms" + # echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" + + - name: Move restart_node log to the working directory + run: | + ls -l $RESTART_TEST_NODE_DATA_PATH + mv $RESTART_TEST_NODE_DATA_PATH/safenode.log ./restart_node.log + continue-on-error: true + if: always() + timeout-minutes: 1 + + - name: Upload restart_node log + uses: actions/upload-artifact@main + with: + name: memory_check_restart_node_log + path: restart_node.log + continue-on-error: true + if: always() diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 7d29af7ced..da6914f65b 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -1122,11 +1122,20 @@ jobs: shell: bash - name: File Download - run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources + run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources > ./download_output 2>&1 env: SN_LOG: "v" timeout-minutes: 2 + - name: showing the download terminal output + run: | + cat download_output + ls -l + cd downloaded_resources + ls -l + shell: bash + if: always() + - name: Stop the local network and upload logs if: always() uses: maidsafe/sn-local-testnet-action@main From 7a9821734da032e112eb102fa4422c1468ced20d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 13:54:46 +0000 Subject: [PATCH 049/128] chore(deps): bump actions/cache from 4.0.2 to 4.1.2 Bumps [actions/cache](https://github.com/actions/cache) from 4.0.2 to 4.1.2. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v4.0.2...v4.1.2) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/node_man_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/node_man_tests.yml b/.github/workflows/node_man_tests.yml index 55cd701cbf..54d6d3d625 100644 --- a/.github/workflows/node_man_tests.yml +++ b/.github/workflows/node_man_tests.yml @@ -25,7 +25,7 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: cargo cache registry, index and build - uses: actions/cache@v4.0.2 + uses: actions/cache@v4.1.2 with: path: | ~/.cargo/registry From 8eeb1a69af10378ce92c83c258e43919451054c2 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 22 Oct 2024 15:59:41 +0200 Subject: [PATCH 050/128] feat(autonomi): generate metadata from actual file --- autonomi/src/client/fs.rs | 46 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index c4ad658455..ba62d355fe 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -11,6 +11,7 @@ use crate::client::data::CostError; use crate::client::Client; use bytes::Bytes; use sn_evm::EvmWallet; +use sn_networking::target_arch::{Duration, SystemTime}; use std::path::PathBuf; use super::archive::{Archive, ArchiveAddr}; @@ -110,7 +111,9 @@ impl Client { println!("Uploading file: {path:?}"); let file = self.file_upload(path.clone(), wallet).await?; - archive.add_file(path, file, Metadata::new()); + let metadata = metadata_from_entry(&entry); + + archive.add_file(path, file, metadata); } let archive_serialized = archive.into_bytes()?; @@ -173,3 +176,44 @@ impl Client { Ok(total_cost.into()) } } + +// Get metadata from directory entry. Defaults to `0` for creation and modification times if +// any error is encountered. Logs errors upon error. +fn metadata_from_entry(entry: &walkdir::DirEntry) -> Metadata { + let fs_metadata = match entry.metadata() { + Ok(metadata) => metadata, + Err(err) => { + tracing::warn!( + "Failed to get metadata for `{}`: {err}", + entry.path().display() + ); + return Metadata { + created: 0, + modified: 0, + }; + } + }; + + let unix_time = |property: &'static str, time: std::io::Result| { + time.inspect_err(|err| { + tracing::warn!( + "Failed to get '{property}' metadata for `{}`: {err}", + entry.path().display() + ); + }) + .unwrap_or(SystemTime::UNIX_EPOCH) + .duration_since(SystemTime::UNIX_EPOCH) + .inspect_err(|err| { + tracing::warn!( + "'{property}' metadata of `{}` is before UNIX epoch: {err}", + entry.path().display() + ); + }) + .unwrap_or(Duration::from_secs(0)) + .as_secs() + }; + let created = unix_time("created", fs_metadata.created()); + let modified = unix_time("modified", fs_metadata.modified()); + + Metadata { created, modified } +} From 435487fd19b3a35502255674cc6ba775256d2607 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 23 Oct 2024 11:22:07 +0200 Subject: [PATCH 051/128] chore: upgrade alloy to version 0.5.3 --- Cargo.lock | 100 ++++++++++++++++++++++++++------------------ autonomi/Cargo.toml | 2 +- evmlib/Cargo.toml | 2 +- 3 files changed, 61 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e76033a306..5e7d997903 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -118,9 +118,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "056f2c01b2aed86e15b43c47d109bfc8b82553dc34e66452875e51247ec31ab2" +checksum = "d8cbebb817e6ada1abb27e642592a39eebc963eb0b9e78f66c467549f3903770" dependencies = [ "alloy-consensus", "alloy-contract", @@ -151,9 +151,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" +checksum = "cdf02dfacfc815214f9b54ff50d54900ba527a68fd73e2c5637ced3460005045" dependencies = [ "alloy-eips", "alloy-primitives", @@ -167,9 +167,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917f7d12cf3971dc8c11c9972f732b35ccb9aaaf5f28f2f87e9e6523bee3a8ad" +checksum = "d45354c6946d064827d3b85041876aad9490b634f1761139934f8b1f65686b09" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -228,20 +228,21 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.1" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +checksum = "c15873ee28dfe5a1aeddd762483bc7f378b465ec49bdce8165c4c46b4f55cb0a" dependencies = [ "alloy-primitives", "alloy-rlp", + "derive_more", "serde", ] [[package]] name = "alloy-eips" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" +checksum = "769da342b6bcd945013925ef4c40763cc82f11e002c60702dba8b444bb60e5a7" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -257,9 +258,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" +checksum = "c698ce0ada980b17f0323e1a28c7da8a2e9abc6dff5be9ee33d1525b28ac46b6" dependencies = [ "alloy-primitives", "alloy-serde", @@ -280,9 +281,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" +checksum = "c1050e1d65524c030b17442b6546b564da51fdab7f71bd534b001ba65f2ebb16" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -294,9 +295,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" +checksum = "da34a18446a27734473af3d77eb21c5ebbdf97ea8eb65c39c0b50916bc659023" dependencies = [ "alloy-consensus", "alloy-eips", @@ -315,9 +316,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" +checksum = "9a968c063fcfcb937736665c865a71fc2242b68916156f5ffa41fee7b44bb695" dependencies = [ "alloy-consensus", "alloy-eips", @@ -328,9 +329,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" +checksum = "439fc6a933b9f8e8b272a8cac35dbeabaf2b2eaf9590482bebedb5782153118e" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -372,9 +373,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" +checksum = "c45dbc0e3630becef9e988b69d43339f68d67e32a854e3c855bc28bd5031895b" dependencies = [ "alloy-chains", "alloy-consensus", @@ -387,6 +388,7 @@ dependencies = [ "alloy-rpc-client", "alloy-rpc-types-anvil", "alloy-rpc-types-eth", + "alloy-signer", "alloy-signer-local", "alloy-transport", "alloy-transport-http", @@ -397,14 +399,17 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", + "parking_lot", "pin-project", "reqwest 0.12.7", + "schnellru", "serde", "serde_json", "thiserror", "tokio", "tracing", "url", + "wasmtimer", ] [[package]] @@ -431,9 +436,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" +checksum = "917e5504e4f8f7e39bdc322ff81589ed54c1e462240adaeb58162c2d986a5a2b" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -449,13 +454,14 @@ dependencies = [ "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-rpc-types" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" +checksum = "07c7eb2dc6db1dd41e5e7bd2b98a38813854efc30e034afd90d1e420e7f3de2b" dependencies = [ "alloy-primitives", "alloy-rpc-types-anvil", @@ -466,9 +472,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" +checksum = "2640928d9b1d43bb1cec7a0d615e10c2b407c5bd8ff1fcbe49e6318a2b62d731" dependencies = [ "alloy-primitives", "alloy-serde", @@ -477,9 +483,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" +checksum = "e855b0daccf2320ba415753c3fed422abe9d3ad5d77b2d6cafcc9bcf32fe387f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -496,9 +502,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" +checksum = "35c2661ca6785add8fc37aff8005439c806ffad58254c19939c6f59ac0d6596e" dependencies = [ "alloy-primitives", "serde", @@ -507,9 +513,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" +checksum = "67eca011160d18a7dc6d8cdc1e8dc13e2e86c908f8e41b02aa76e429d6fe7085" dependencies = [ "alloy-primitives", "async-trait", @@ -521,9 +527,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f" +checksum = "1c54b195a6ee5a83f32e7c697b4e6b565966737ed5a2ef9176bbbb39f720d023" dependencies = [ "alloy-consensus", "alloy-network", @@ -610,9 +616,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" +checksum = "3e4a136e733f55fef0870b81e1f8f1db28e78973d1b1ae5a5df642ba39538a07" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -626,13 +632,14 @@ dependencies = [ "tracing", "url", "wasm-bindgen-futures", + "wasmtimer", ] [[package]] name = "alloy-transport-http" -version = "0.4.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" +checksum = "1a6b358a89b6d107b92d09b61a61fbc04243942182709752c796f4b29402cead" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -7567,6 +7574,17 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schnellru" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" +dependencies = [ + "ahash", + "cfg-if", + "hashbrown 0.13.2", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -9898,7 +9916,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 2ec971c006..462533f49b 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -54,7 +54,7 @@ wasm-bindgen-futures = "0.4.43" serde-wasm-bindgen = "0.6.5" [dev-dependencies] -alloy = { version = "0.4.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } +alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } eyre = "0.6.5" sha2 = "0.10.6" sn_logging = { path = "../sn_logging", version = "0.2.33" } diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index c3cfbdf432..7db6d22301 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -14,7 +14,7 @@ local = [] external-signer = [] [dependencies] -alloy = { version = "0.4.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } +alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } dirs-next = "~2.0.0" serde = "1.0" serde_with = { version = "3.11.0", features = ["macros"] } From 49115fd211e3417f4d613c18a73cda898adb682d Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 23 Oct 2024 17:45:03 +0800 Subject: [PATCH 052/128] Revert "Revert "Merge pull request #2224 from joshuef/RangeBasedGets"" This reverts commit 09c36c85bd0bea1931824469e8b934608441a28b. --- .github/workflows/merge.yml | 392 ++++++++++++-------- sn_networking/src/bootstrap.rs | 120 +----- sn_networking/src/cmd.rs | 163 ++++---- sn_networking/src/driver.rs | 157 +++++--- sn_networking/src/error.rs | 14 +- sn_networking/src/event/kad.rs | 331 +++++++++++------ sn_networking/src/event/request_response.rs | 147 ++++---- sn_networking/src/event/swarm.rs | 54 ++- sn_networking/src/lib.rs | 244 +++++++++++- sn_networking/src/network_discovery.rs | 37 +- sn_networking/src/record_store.rs | 23 +- sn_networking/src/record_store_api.rs | 14 +- sn_networking/src/replication_fetcher.rs | 64 +++- sn_networking/src/transfers.rs | 34 +- sn_node/src/put_validation.rs | 13 +- sn_node/src/replication.rs | 120 ++---- sn_node/tests/double_spend.rs | 196 ++++++---- sn_node/tests/storage_payments.rs | 257 ++++++------- sn_node/tests/verify_data_location.rs | 22 +- sn_node/tests/verify_routing_table.rs | 2 +- sn_protocol/src/error.rs | 3 + sn_protocol/src/storage.rs | 5 +- sn_protocol/src/storage/header.rs | 27 ++ sn_transfers/src/wallet/error.rs | 10 + 24 files changed, 1438 insertions(+), 1011 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 98ee999b06..1b5395b028 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -531,15 +531,19 @@ jobs: # platform: ${{ matrix.os }} # build: true - # - name: Check SAFE_PEERS was set - # shell: bash - # run: | - # if [[ -z "$SAFE_PEERS" ]]; then - # echo "The SAFE_PEERS variable has not been set" - # exit 1 - # else - # echo "SAFE_PEERS has been set to $SAFE_PEERS" - # fi + # # incase the faucet is not ready yet + # - name: 30s sleep for faucet completion + # run: sleep 30 + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi # - name: execute token_distribution tests # run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1 @@ -631,7 +635,37 @@ jobs: log_file_prefix: safe_test_logs_churn platform: ${{ matrix.os }} - - name: Verify restart of nodes using rg + - name: Get total node count + shell: bash + timeout-minutes: 1 + run: | + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + echo "Node dir count is $node_count" + + - name: Get restart of nodes using rg + shell: bash + timeout-minutes: 1 + # get the counts, then the specific line, and then the digit count only + # then check we have an expected level of restarts + # TODO: make this use an env var, or relate to testnet size + run: | + restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Restarted $restart_count nodes" + + - name: Get peers removed from nodes using rg + shell: bash + timeout-minutes: 1 + run: | + peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) || { echo "Failed to extract peer removal count"; exit 1; } + if [ -z "$peer_removed" ]; then + echo "No peer removal count found" + exit 1 + fi + echo "PeerRemovedFromRoutingTable $peer_removed times" + + - name: Verify peers removed exceed restarted node counts shell: bash timeout-minutes: 1 # get the counts, then the specific line, and then the digit count only @@ -648,8 +682,6 @@ jobs: echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" exit 1 fi - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - echo "Node dir count is $node_count" # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here # if [ $restart_count -lt $node_count ]; then @@ -768,7 +800,7 @@ jobs: uses: maidsafe/sn-local-testnet-action@main with: action: stop - log_file_prefix: safe_test_logs_data_location + log_file_prefix: safe_test_logs_data_location_routing_table platform: ${{ matrix.os }} - name: Verify restart of nodes using rg @@ -860,15 +892,15 @@ jobs: # echo "SAFE_PEERS has been set to $SAFE_PEERS" # fi - # - name: Create and fund a wallet first time - # run: | - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt - # echo "----------" - # cat first.txt - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet first time + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt + # echo "----------" + # cat first.txt + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Move faucet log to the working folder # run: | @@ -894,44 +926,64 @@ jobs: # continue-on-error: true # if: always() - # - name: Create and fund a wallet second time - # run: | - # ls -l /home/runner/.local/share - # ls -l /home/runner/.local/share/safe - # rm -rf /home/runner/.local/share/safe/test_faucet - # rm -rf /home/runner/.local/share/safe/test_genesis - # rm -rf /home/runner/.local/share/safe/client - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt - # echo "----------" - # cat second.txt - # if grep "genesis is already spent" second.txt; then - # echo "Duplicated faucet rejected" - # else - # echo "Duplicated faucet not rejected!" - # exit 1 - # fi - # env: - # SN_LOG: "all" - # timeout-minutes: 5 - - # - name: Create and fund a wallet with different keypair - # run: | - # ls -l /home/runner/.local/share - # ls -l /home/runner/.local/share/safe - # rm -rf /home/runner/.local/share/safe/test_faucet - # rm -rf /home/runner/.local/share/safe/test_genesis - # rm -rf /home/runner/.local/share/safe/client - # ~/safe --log-output-dest=data-dir wallet create --no-password - # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then - # echo "Faucet with different genesis key not rejected!" - # exit 1 - # else - # echo "Faucet with different genesis key rejected" - # fi - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Cleanup prior faucet and cashnotes + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Create a new wallet + # run: ~/safe --log-output-dest=data-dir wallet create --no-password + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Attempt second faucet genesis disbursement + # run: ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) > second.txt 2>&1 || true + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: cat second.txt + # run: cat second.txt + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Verify a second disbursement is rejected + # run: | + # if grep "Faucet disbursement has already occured" second.txt; then + # echo "Duplicated faucet rejected" + # else + # echo "Duplicated faucet not rejected!" + # exit 1 + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Create and fund a wallet with different keypair + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # ~/safe --log-output-dest=data-dir wallet create --no-password + # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then + # echo "Faucet with different genesis key not rejected!" + # exit 1 + # else + # echo "Faucet with different genesis key rejected" + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Build faucet binary again without the gifting feature # run: cargo build --release --bin faucet @@ -1059,14 +1111,14 @@ jobs: # echo "PWD subdirs:" # du -sh */ - # - name: Create and fund a wallet to pay for files storage - # run: | - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet to pay for files storage + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Start a client to upload # run: ~/safe --log-output-dest=data-dir files upload "ubuntu-14.04.6-desktop-i386.iso" --retry-strategy quick @@ -1116,6 +1168,8 @@ jobs: # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" # name: Replication bench with heavy upload # runs-on: ubuntu-latest + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client # steps: # - uses: actions/checkout@v4 @@ -1192,14 +1246,28 @@ jobs: # echo "SAFE_PEERS has been set to $SAFE_PEERS" # fi - # - name: Create and fund a wallet to pay for files storage - # run: | - # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Sleep 15s + # shell: bash + # run: sleep 15 + + # - name: Check faucet has been funded + # shell: bash + # run: | + # cash_note_count=$(ls -l /home/runner/.local/share/safe/test_faucet/wallet/cash_notes/ | wc -l) + # echo $cash_note_count + # if [ "$cash_note_count" -eq 0 ]; then + # echo "Error: Expected at least 1 cash note, but found $cash_note_count" + # exit 1 + # fi + + # - name: Create and fund a wallet to pay for files storage + # run: | + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Start a client to upload first file # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_1.tar.gz" --retry-strategy quick @@ -1207,29 +1275,32 @@ jobs: # SN_LOG: "all" # timeout-minutes: 5 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - # echo "Find $cash_note_files cash_note files" - # if [ $expected_cash_notes_files -lt $cash_note_files ]; then - # echo "Got too many cash_note files leftover: $cash_note_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - # if [ $expected_payment_files -lt $payment_files ]; then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 10 + # - name: Check current directories + # run: | + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # timeout-minutes: 1 + + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + + # timeout-minutes: 10 # - name: Wait for certain period # run: sleep 300 @@ -1241,52 +1312,49 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) - # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then - # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) - # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) + # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then + # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) + # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # timeout-minutes: 10 # - name: Wait for certain period # run: sleep 300 # timeout-minutes: 6 - # # Start a different client to avoid local wallet slow down with more payments handled. - # - name: Start a different client - # run: | - # pwd - # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first - # ls -l $SAFE_DATA_PATH - # ls -l $SAFE_DATA_PATH/client_first - # mkdir $SAFE_DATA_PATH/client - # ls -l $SAFE_DATA_PATH - # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs - # ls -l $CLIENT_DATA_PATH - # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # SAFE_DATA_PATH: /home/runner/.local/share/safe - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 25 + # # Start a different client to avoid local wallet slow down with more payments handled. + # - name: Start a different client + # run: | + # pwd + # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first + # ls -l $SAFE_DATA_PATH + # ls -l $SAFE_DATA_PATH/client_first + # mkdir $SAFE_DATA_PATH/client + # ls -l $SAFE_DATA_PATH + # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs + # ls -l $CLIENT_DATA_PATH + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # SAFE_DATA_PATH: /home/runner/.local/share/safe + # timeout-minutes: 25 # - name: Use second client to upload third file # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_3.tar.gz" --retry-strategy quick @@ -1294,29 +1362,27 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - # echo "Find $cash_note_files cash_note files" - # if [ $expected_cash_notes_files -lt $cash_note_files ]; then - # echo "Got too many cash_note files leftover: $cash_note_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - # if [ $expected_payment_files -lt $payment_files ]; then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # timeout-minutes: 10 # - name: Stop the local network and upload logs # if: always() diff --git a/sn_networking/src/bootstrap.rs b/sn_networking/src/bootstrap.rs index f8b7cf1e59..ec6c019a88 100644 --- a/sn_networking/src/bootstrap.rs +++ b/sn_networking/src/bootstrap.rs @@ -7,45 +7,19 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{driver::PendingGetClosestType, SwarmDriver}; -use rand::{rngs::OsRng, Rng}; use tokio::time::Duration; -use crate::target_arch::{interval, Instant, Interval}; +use crate::target_arch::Instant; /// The default interval at which NetworkDiscovery is triggered. The interval is increased as more peers are added to the /// routing table. -pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(10); - -/// Every BOOTSTRAP_CONNECTED_PEERS_STEP connected peer, we step up the BOOTSTRAP_INTERVAL to slow down bootstrapping -/// process -const BOOTSTRAP_CONNECTED_PEERS_STEP: u32 = 5; - -/// If the previously added peer has been before LAST_PEER_ADDED_TIME_LIMIT, then we should slowdown the bootstrapping -/// process. This is to make sure we don't flood the network with `FindNode` msgs. -const LAST_PEER_ADDED_TIME_LIMIT: Duration = Duration::from_secs(180); - -/// A minimum interval to prevent bootstrap got triggered too often -const LAST_BOOTSTRAP_TRIGGERED_TIME_LIMIT: Duration = Duration::from_secs(30); - -/// The bootstrap interval to use if we haven't added any new peers in a while. -const NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S: u64 = 600; +pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(15); impl SwarmDriver { /// This functions triggers network discovery based on when the last peer was added to the RT and the number of - /// peers in RT. The function also returns a new bootstrap interval that is proportional to the number of - /// peers in RT, so more peers in RT, the longer the interval. - pub(crate) async fn run_bootstrap_continuously( - &mut self, - current_bootstrap_interval: Duration, - ) -> Option { - let (should_bootstrap, new_interval) = self - .bootstrap - .should_we_bootstrap(self.peers_in_rt as u32, current_bootstrap_interval) - .await; - if should_bootstrap { - self.trigger_network_discovery(); - } - new_interval + /// peers in RT. + pub(crate) fn run_bootstrap_continuously(&mut self) { + self.trigger_network_discovery(); } pub(crate) fn trigger_network_discovery(&mut self) { @@ -61,27 +35,27 @@ impl SwarmDriver { .get_closest_peers(addr.as_bytes()); let _ = self.pending_get_closest_peers.insert( query_id, - (PendingGetClosestType::NetworkDiscovery, Default::default()), + ( + addr, + PendingGetClosestType::NetworkDiscovery, + Default::default(), + ), ); } self.bootstrap.initiated(); - debug!("Trigger network discovery took {:?}", now.elapsed()); + info!("Trigger network discovery took {:?}", now.elapsed()); } } /// Tracks and helps with the continuous kad::bootstrapping process pub(crate) struct ContinuousBootstrap { - initial_bootstrap_done: bool, - last_peer_added_instant: Instant, last_bootstrap_triggered: Option, } impl ContinuousBootstrap { pub(crate) fn new() -> Self { Self { - initial_bootstrap_done: false, - last_peer_added_instant: Instant::now(), last_bootstrap_triggered: None, } } @@ -90,76 +64,4 @@ impl ContinuousBootstrap { pub(crate) fn initiated(&mut self) { self.last_bootstrap_triggered = Some(Instant::now()); } - - /// Notify about a newly added peer to the RT. This will help with slowing down the bootstrap process. - /// Returns `true` if we have to perform the initial bootstrapping. - pub(crate) fn notify_new_peer(&mut self) -> bool { - self.last_peer_added_instant = Instant::now(); - // true to kick off the initial bootstrapping. `run_bootstrap_continuously` might kick of so soon that we might - // not have a single peer in the RT and we'd not perform any bootstrapping for a while. - if !self.initial_bootstrap_done { - self.initial_bootstrap_done = true; - true - } else { - false - } - } - - /// Returns `true` if we should carry out the Kademlia Bootstrap process immediately. - /// Also optionally returns the new interval to re-bootstrap. - pub(crate) async fn should_we_bootstrap( - &self, - peers_in_rt: u32, - current_interval: Duration, - ) -> (bool, Option) { - let is_ongoing = if let Some(last_bootstrap_triggered) = self.last_bootstrap_triggered { - last_bootstrap_triggered.elapsed() < LAST_BOOTSTRAP_TRIGGERED_TIME_LIMIT - } else { - false - }; - let should_bootstrap = !is_ongoing && peers_in_rt >= 1; - - // if it has been a while (LAST_PEER_ADDED_TIME_LIMIT) since we have added a new peer to our RT, then, slowdown - // the bootstrapping process. - // Don't slow down if we haven't even added one peer to our RT. - if self.last_peer_added_instant.elapsed() > LAST_PEER_ADDED_TIME_LIMIT && peers_in_rt != 0 { - // To avoid a heart beat like cpu usage due to the 1K candidates generation, - // randomize the interval within certain range - let no_peer_added_slowdown_interval: u64 = OsRng.gen_range( - NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S / 2..NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S, - ); - let no_peer_added_slowdown_interval_duration = - Duration::from_secs(no_peer_added_slowdown_interval); - info!( - "It has been {LAST_PEER_ADDED_TIME_LIMIT:?} since we last added a peer to RT. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {no_peer_added_slowdown_interval_duration:?}" - ); - - // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. - #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] - let mut new_interval = interval(no_peer_added_slowdown_interval_duration); - #[cfg(not(target_arch = "wasm32"))] - new_interval.tick().await; - - return (should_bootstrap, Some(new_interval)); - } - - // increment bootstrap_interval in steps of BOOTSTRAP_INTERVAL every BOOTSTRAP_CONNECTED_PEERS_STEP - let step = peers_in_rt / BOOTSTRAP_CONNECTED_PEERS_STEP; - let step = std::cmp::max(1, step); - let new_interval = BOOTSTRAP_INTERVAL * step; - let new_interval = if new_interval > current_interval { - info!("More peers have been added to our RT!. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {new_interval:?}"); - - // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. - #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] - let mut interval = interval(new_interval); - #[cfg(not(target_arch = "wasm32"))] - interval.tick().await; - - Some(interval) - } else { - None - }; - (should_bootstrap, new_interval) - } } diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index b0eda19190..5ec9ebd827 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -7,24 +7,25 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ + close_group_majority, driver::{PendingGetClosestType, SwarmDriver}, error::{NetworkError, Result}, event::TerminateNodeReason, log_markers::Marker, - multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, CLOSE_GROUP_SIZE, - REPLICATION_PEERS_COUNT, + multiaddr_pop_p2p, sort_peers_by_address_and_limit, GetRecordCfg, GetRecordError, MsgResponder, + NetworkEvent, CLOSE_GROUP_SIZE, }; use libp2p::{ kad::{ store::{Error as StoreError, RecordStore}, - Quorum, Record, RecordKey, + KBucketDistance, Quorum, Record, RecordKey, }, Multiaddr, PeerId, }; use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics}; use sn_protocol::{ messages::{Cmd, Request, Response}, - storage::{RecordHeader, RecordKind, RecordType}, + storage::{get_type_from_record, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; use std::{ @@ -33,7 +34,6 @@ use std::{ time::Duration, }; use tokio::sync::oneshot; -use xor_name::XorName; use crate::target_arch::Instant; @@ -56,6 +56,15 @@ pub enum NodeIssue { /// Commands to send to the Swarm pub enum LocalSwarmCmd { + // Returns all the peers from all the k-buckets from the local Routing Table. + // This includes our PeerId as well. + GetAllLocalPeersExcludingSelf { + sender: oneshot::Sender>, + }, + /// Return the current GetRange as determined by the SwarmDriver + GetCurrentRange { + sender: oneshot::Sender, + }, /// Get a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that /// bucket. GetKBuckets { @@ -67,8 +76,8 @@ pub enum LocalSwarmCmd { sender: oneshot::Sender>, }, // Get closest peers from the local RoutingTable - GetCloseGroupLocalPeers { - key: NetworkAddress, + GetCloseRangeLocalPeers { + address: NetworkAddress, sender: oneshot::Sender>, }, GetSwarmLocalState(oneshot::Sender), @@ -213,15 +222,11 @@ impl Debug for LocalSwarmCmd { PrettyPrintRecordKey::from(key) ) } - LocalSwarmCmd::GetClosestKLocalPeers { .. } => { write!(f, "LocalSwarmCmd::GetClosestKLocalPeers") } - LocalSwarmCmd::GetCloseGroupLocalPeers { key, .. } => { - write!( - f, - "LocalSwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}" - ) + LocalSwarmCmd::GetCloseRangeLocalPeers { address: key, .. } => { + write!(f, "SwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}") } LocalSwarmCmd::GetLocalStoreCost { .. } => { write!(f, "LocalSwarmCmd::GetLocalStoreCost") @@ -242,6 +247,12 @@ impl Debug for LocalSwarmCmd { LocalSwarmCmd::GetKBuckets { .. } => { write!(f, "LocalSwarmCmd::GetKBuckets") } + LocalSwarmCmd::GetCurrentRange { .. } => { + write!(f, "SwarmCmd::GetCurrentRange") + } + LocalSwarmCmd::GetAllLocalPeersExcludingSelf { .. } => { + write!(f, "SwarmCmd::GetAllLocalPeers") + } LocalSwarmCmd::GetSwarmLocalState { .. } => { write!(f, "LocalSwarmCmd::GetSwarmLocalState") } @@ -472,6 +483,7 @@ impl SwarmDriver { let _ = self.pending_get_closest_peers.insert( query_id, ( + key, PendingGetClosestType::FunctionCall(sender), Default::default(), ), @@ -541,6 +553,7 @@ impl SwarmDriver { Ok(()) } + pub(crate) fn handle_local_cmd(&mut self, cmd: LocalSwarmCmd) -> Result<(), NetworkError> { let start = Instant::now(); let mut cmd_string; @@ -624,28 +637,7 @@ impl SwarmDriver { let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); - let record_type = match RecordHeader::from_record(&record) { - Ok(record_header) => { - match record_header.kind { - RecordKind::Chunk => RecordType::Chunk, - RecordKind::Scratchpad => RecordType::Scratchpad, - RecordKind::Spend | RecordKind::Register => { - let content_hash = XorName::from_content(&record.value); - RecordType::NonChunk(content_hash) - } - RecordKind::ChunkWithPayment - | RecordKind::RegisterWithPayment - | RecordKind::ScratchpadWithPayment => { - error!("Record {record_key:?} with payment shall not be stored locally."); - return Err(NetworkError::InCorrectRecordHeader); - } - } - } - Err(err) => { - error!("For record {record_key:?}, failed to parse record_header {err:?}"); - return Err(NetworkError::InCorrectRecordHeader); - } - }; + let record_type = get_type_from_record(&record)?; let result = self .swarm @@ -694,16 +686,8 @@ impl SwarmDriver { // The record_store will prune far records and setup a `distance range`, // once reached the `max_records` cap. - if let Some(distance) = self - .swarm - .behaviour_mut() - .kademlia - .store_mut() - .get_farthest_replication_distance_bucket() - { - self.replication_fetcher - .set_replication_distance_range(distance); - } + self.replication_fetcher + .set_replication_distance_range(self.get_request_range()); if let Err(err) = result { error!("Can't store verified record {record_key:?} locally: {err:?}"); @@ -760,6 +744,10 @@ impl SwarmDriver { .record_addresses(); let _ = sender.send(addresses); } + LocalSwarmCmd::GetCurrentRange { sender } => { + cmd_string = "GetCurrentRange"; + let _ = sender.send(self.get_request_range()); + } LocalSwarmCmd::GetKBuckets { sender } => { cmd_string = "GetKBuckets"; let mut ilog2_kbuckets = BTreeMap::new(); @@ -778,9 +766,13 @@ impl SwarmDriver { } let _ = sender.send(ilog2_kbuckets); } - LocalSwarmCmd::GetCloseGroupLocalPeers { key, sender } => { - cmd_string = "GetCloseGroupLocalPeers"; - let key = key.as_kbucket_key(); + LocalSwarmCmd::GetAllLocalPeersExcludingSelf { sender } => { + cmd_string = "GetAllLocalPeersExcludingSelf"; + let _ = sender.send(self.get_all_local_peers_excluding_self()); + } + LocalSwarmCmd::GetCloseRangeLocalPeers { address, sender } => { + cmd_string = "GetCloseRangeLocalPeers"; + let key = address.as_kbucket_key(); // calls `kbuckets.closest_keys(key)` internally, which orders the peers by // increasing distance // Note it will return all peers, heance a chop down is required. @@ -790,7 +782,6 @@ impl SwarmDriver { .kademlia .get_closest_local_peers(&key) .map(|peer| peer.into_preimage()) - .take(CLOSE_GROUP_SIZE) .collect(); let _ = sender.send(closest_peers); @@ -981,24 +972,72 @@ impl SwarmDriver { let _ = self.quotes_history.insert(peer_id, quote); } - fn try_interval_replication(&mut self) -> Result<()> { - // get closest peers from buckets, sorted by increasing distance to us - let our_peer_id = self.self_peer_id.into(); - let closest_k_peers = self + /// From all local peers, returns any within (and just exceeding) current get_range for a given key + pub(crate) fn get_filtered_peers_exceeding_range( + &mut self, + target_address: &NetworkAddress, + ) -> Vec { + let acceptable_distance_range = self.get_request_range(); + let target_key = target_address.as_kbucket_key(); + + let peers = self .swarm .behaviour_mut() .kademlia - .get_closest_local_peers(&our_peer_id) - // Map KBucketKey to PeerId. - .map(|key| key.into_preimage()); - - // Only grab the closest nodes within the REPLICATE_RANGE - let mut replicate_targets = closest_k_peers - .into_iter() - // add some leeway to allow for divergent knowledge - .take(REPLICATION_PEERS_COUNT) + .get_closest_local_peers(&target_key) + .filter_map(|key| { + // here we compare _bucket_, not the exact distance. + // We want to include peers that are just outside the range + // Such that we can and will exceed the range in a search eventually + if acceptable_distance_range.ilog2() < target_key.distance(&key).ilog2() { + return None; + } + + // Map KBucketKey to PeerId. + let peer_id = key.into_preimage(); + Some(peer_id) + }) .collect::>(); + peers + } + + /// From all local peers, returns any within current get_range for a given key + /// Excludes self + pub(crate) fn get_filtered_peers_exceeding_range_or_closest_nodes( + &mut self, + target_address: &NetworkAddress, + ) -> Vec { + let filtered_peers = self.get_filtered_peers_exceeding_range(target_address); + let closest_node_buffer_zone = CLOSE_GROUP_SIZE + close_group_majority(); + if filtered_peers.len() >= closest_node_buffer_zone { + filtered_peers + } else { + warn!("Insufficient peers within replication range of {target_address:?}. Falling back to use {closest_node_buffer_zone:?} closest nodes"); + let all_peers = self.get_all_local_peers_excluding_self(); + match sort_peers_by_address_and_limit( + &all_peers, + target_address, + closest_node_buffer_zone, + ) { + Ok(peers) => peers.iter().map(|p| **p).collect(), + Err(err) => { + error!("sorting peers close to {target_address:?} failed, sort error: {err:?}"); + warn!( + "Using all peers within range even though it's less than CLOSE_GROUP_SIZE." + ); + filtered_peers + } + } + } + } + + fn try_interval_replication(&mut self) -> Result<()> { + let our_address = NetworkAddress::from_peer(self.self_peer_id); + + let mut replicate_targets = + self.get_filtered_peers_exceeding_range_or_closest_nodes(&our_address); + let now = Instant::now(); self.replication_targets .retain(|_peer_id, timestamp| *timestamp > now); diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index ec716cb4df..f432d231fc 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -20,6 +20,7 @@ use crate::{ record_store_api::UnifiedRecordStore, relay_manager::RelayManager, replication_fetcher::ReplicationFetcher, + sort_peers_by_distance_to, target_arch::{interval, spawn, Instant}, GetRecordError, Network, CLOSE_GROUP_SIZE, }; @@ -32,7 +33,6 @@ use futures::future::Either; use futures::StreamExt; #[cfg(feature = "local")] use libp2p::mdns; -use libp2p::Transport as _; use libp2p::{core::muxing::StreamMuxerBox, relay}; use libp2p::{ identity::Keypair, @@ -45,6 +45,7 @@ use libp2p::{ }, Multiaddr, PeerId, }; +use libp2p::{kad::KBucketDistance, Transport as _}; #[cfg(feature = "open-metrics")] use prometheus_client::metrics::info::Info; use sn_evm::PaymentQuote; @@ -59,10 +60,9 @@ use sn_protocol::{ }; use sn_registers::SignedRegister; use std::{ - collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{btree_map::Entry, BTreeMap, HashMap, HashSet, VecDeque}, fmt::Debug, net::SocketAddr, - num::NonZeroUsize, path::PathBuf, }; use tokio::sync::{mpsc, oneshot}; @@ -77,6 +77,9 @@ pub(crate) const CLOSET_RECORD_CHECK_INTERVAL: Duration = Duration::from_secs(15 /// Interval over which we query relay manager to check if we can make any more reservations. pub(crate) const RELAY_MANAGER_RESERVATION_INTERVAL: Duration = Duration::from_secs(30); +// Number of range distances to keep in the circular buffer +pub const GET_RANGE_STORAGE_LIMIT: usize = 100; + const KAD_STREAM_PROTOCOL_ID: StreamProtocol = StreamProtocol::new("/autonomi/kad/1.0.0"); /// The ways in which the Get Closest queries are used. @@ -87,7 +90,9 @@ pub(crate) enum PendingGetClosestType { /// These are queries made by a function at the upper layers and contains a channel to send the result back. FunctionCall(oneshot::Sender>), } -type PendingGetClosest = HashMap)>; + +/// Maps a query to the address, the type of query and the peers that are being queried. +type PendingGetClosest = HashMap)>; /// Using XorName to differentiate different record content under the same key. type GetRecordResultMap = HashMap)>; @@ -123,13 +128,6 @@ const NETWORKING_CHANNEL_SIZE: usize = 10_000; /// Time before a Kad query times out if no response is received const KAD_QUERY_TIMEOUT_S: Duration = Duration::from_secs(10); -// Init during compilation, instead of runtime error that should never happen -// Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) -const REPLICATION_FACTOR: NonZeroUsize = match NonZeroUsize::new(CLOSE_GROUP_SIZE) { - Some(v) => v, - None => panic!("CLOSE_GROUP_SIZE should not be zero"), -}; - /// The various settings to apply to when fetching a record from network #[derive(Clone)] pub struct GetRecordCfg { @@ -349,8 +347,6 @@ impl NetworkBuilder { .set_publication_interval(None) // 1mb packet size .set_max_packet_size(MAX_PACKET_SIZE) - // How many nodes _should_ store data. - .set_replication_factor(REPLICATION_FACTOR) .set_query_timeout(KAD_QUERY_TIMEOUT_S) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. .disjoint_query_paths(true) @@ -429,9 +425,7 @@ impl NetworkBuilder { .set_kbucket_inserts(libp2p::kad::BucketInserts::Manual) .set_max_packet_size(MAX_PACKET_SIZE) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. - .disjoint_query_paths(true) - // How many nodes _should_ store data. - .set_replication_factor(REPLICATION_FACTOR); + .disjoint_query_paths(true); let (network, net_event_recv, driver) = self.build( kad_cfg, @@ -697,6 +691,8 @@ impl NetworkBuilder { bad_nodes: Default::default(), quotes_history: Default::default(), replication_targets: Default::default(), + range_distances: VecDeque::with_capacity(GET_RANGE_STORAGE_LIMIT), + first_contact_made: false, }; let network = Network::new( @@ -732,7 +728,7 @@ pub struct SwarmDriver { pub(crate) local_cmd_sender: mpsc::Sender, local_cmd_receiver: mpsc::Receiver, network_cmd_receiver: mpsc::Receiver, - event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. + pub(crate) event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. /// Trackers for underlying behaviour related events pub(crate) pending_get_closest_peers: PendingGetClosest, @@ -755,6 +751,13 @@ pub struct SwarmDriver { pub(crate) bad_nodes: BadNodes, pub(crate) quotes_history: BTreeMap, pub(crate) replication_targets: BTreeMap, + + // The recent range_distances calculated by the node + // Each update is generated when there is a routing table change + // We use the largest of these X_STORAGE_LIMIT values as our X distance. + pub(crate) range_distances: VecDeque, + // have we found out initial peer + pub(crate) first_contact_made: bool, } impl SwarmDriver { @@ -805,28 +808,24 @@ impl SwarmDriver { // logging for handling events happens inside handle_swarm_events // otherwise we're rewriting match statements etc around this anwyay if let Err(err) = self.handle_swarm_events(swarm_event) { - warn!("Error while handling swarm event: {err}"); + warn!("Issue while handling swarm event: {err}"); } }, // thereafter we can check our intervals // runs every bootstrap_interval time _ = bootstrap_interval.tick() => { - if let Some(new_interval) = self.run_bootstrap_continuously(bootstrap_interval.period()).await { - bootstrap_interval = new_interval; - } + self.run_bootstrap_continuously(); } _ = set_farthest_record_interval.tick() => { if !self.is_client { - let closest_k_peers = self.get_closest_k_value_local_peers(); - - if let Some(distance) = self.get_responsbile_range_estimate(&closest_k_peers) { - info!("Set responsible range to {distance}"); - // set any new distance to farthest record in the store - self.swarm.behaviour_mut().kademlia.store_mut().set_distance_range(distance); - // the distance range within the replication_fetcher shall be in sync as well - self.replication_fetcher.set_replication_distance_range(distance); - } + let get_range = self.get_request_range(); + self.swarm.behaviour_mut().kademlia.store_mut().set_distance_range(get_range); + + // the distance range within the replication_fetcher shall be in sync as well + self.replication_fetcher.set_replication_distance_range(get_range); + + } } _ = relay_manager_reservation_interval.tick() => self.relay_manager.try_connecting_to_relay(&mut self.swarm, &self.bad_nodes), @@ -838,32 +837,90 @@ impl SwarmDriver { // ---------- Crate helpers ------------------- // -------------------------------------------- - /// Uses the closest k peers to estimate the farthest address as - /// `K_VALUE / 2`th peer's bucket. - fn get_responsbile_range_estimate( + /// Defines a new X distance range to be used for GETs and data replication + /// + /// Enumerates buckets and generates a random distance in the first bucket + /// that has at least `MIN_PEERS_IN_BUCKET` peers. + /// + pub(crate) fn set_request_range( &mut self, - // Sorted list of closest k peers to our peer id. - closest_k_peers: &[PeerId], - ) -> Option { - // if we don't have enough peers we don't set the distance range yet. - let mut farthest_distance = None; - - if closest_k_peers.is_empty() { - return farthest_distance; + queried_address: NetworkAddress, + network_discovery_peers: &[PeerId], + ) { + info!( + "Adding a GetRange to our stash deriving from {:?} peers", + network_discovery_peers.len() + ); + + let sorted_distances = sort_peers_by_distance_to(network_discovery_peers, queried_address); + + let mapped: Vec<_> = sorted_distances.iter().map(|d| d.ilog2()).collect(); + info!("Sorted distances: {:?}", mapped); + + let farthest_peer_to_check = self + .get_all_local_peers_excluding_self() + .len() + .checked_div(3 * CLOSE_GROUP_SIZE) + .unwrap_or(1); + + info!("Farthest peer we'll check: {:?}", farthest_peer_to_check); + + let yardstick = if sorted_distances.len() >= farthest_peer_to_check { + sorted_distances.get(farthest_peer_to_check.saturating_sub(1)) + } else { + sorted_distances.last() + }; + if let Some(distance) = yardstick { + if self.range_distances.len() >= GET_RANGE_STORAGE_LIMIT { + if let Some(distance) = self.range_distances.pop_front() { + trace!("Removed distance range: {:?}", distance.ilog2()); + } + } + + info!("Adding new distance range: {:?}", distance.ilog2()); + + self.range_distances.push_back(*distance); } - let our_address = NetworkAddress::from_peer(self.self_peer_id); + info!( + "Distance between peers in set_request_range call: {:?}", + yardstick + ); + } + + /// Returns the KBucketDistance we are currently using as our X value + /// for range based search. + pub(crate) fn get_request_range(&self) -> KBucketDistance { + let mut sorted_distances = self.range_distances.iter().collect::>(); - // get `K_VALUE / 2`th peer's address distance - // This is a rough estimate of the farthest address we might be responsible for. - // We want this to be higher than actually necessary, so we retain more data - // and can be sure to pass bad node checks - let target_index = std::cmp::min(K_VALUE.get() / 2, closest_k_peers.len()) - 1; + sorted_distances.sort_unstable(); - let address = NetworkAddress::from_peer(closest_k_peers[target_index]); - farthest_distance = our_address.distance(&address).ilog2(); + let median_index = sorted_distances.len() / 2; - farthest_distance + let default = KBucketDistance::default(); + let median = sorted_distances.get(median_index).cloned(); + + if let Some(dist) = median { + *dist + } else { + default + } + } + + /// get all the peers from our local RoutingTable. Excluding self + pub(crate) fn get_all_local_peers_excluding_self(&mut self) -> Vec { + let our_peer_id = self.self_peer_id; + let mut all_peers: Vec = vec![]; + for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { + for entry in kbucket.iter() { + let id = entry.node.key.into_preimage(); + + if id != our_peer_id { + all_peers.push(id); + } + } + } + all_peers } /// Pushes NetworkSwarmCmd off thread so as to be non-blocking diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 6534c84017..99bf1fbe92 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -30,10 +30,11 @@ pub(super) type Result = std::result::Result; #[derive(Error, Clone)] pub enum GetRecordError { #[error("Get Record completed with non enough copies")] - NotEnoughCopies { + NotEnoughCopiesInRange { record: Record, expected: usize, got: usize, + range: u32, }, #[error("Record not found in the network")] @@ -55,16 +56,18 @@ pub enum GetRecordError { impl Debug for GetRecordError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::NotEnoughCopies { + Self::NotEnoughCopiesInRange { record, expected, got, + range, } => { let pretty_key = PrettyPrintRecordKey::from(&record.key); - f.debug_struct("NotEnoughCopies") + f.debug_struct("NotEnoughCopiesInRange") .field("record_key", &pretty_key) .field("expected", &expected) .field("got", &got) + .field("range", &range) .finish() } Self::RecordNotFound => write!(f, "RecordNotFound"), @@ -122,9 +125,6 @@ pub enum NetworkError { #[error("The RecordKind obtained from the Record did not match with the expected kind: {0}")] RecordKindMismatch(RecordKind), - #[error("Record header is incorrect")] - InCorrectRecordHeader, - // ---------- Transfer Errors #[error("Failed to get spend: {0}")] FailedToGetSpend(String), @@ -138,7 +138,7 @@ pub enum NetworkError { // ---------- Spend Errors #[error("Spend not found: {0:?}")] NoSpendFoundInsideRecord(SpendAddress), - #[error("Double spend(s) attempt was detected. The signed spends are: {0:?}")] + #[error("Double SpendAttempt was detected. The signed spends are: {0:?}")] DoubleSpendAttempt(Vec), // ---------- Store Error diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 6551f6e5f0..88a2a7ffca 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -7,21 +7,26 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - driver::PendingGetClosestType, get_quorum_value, get_raw_signed_spends_from_record, - target_arch::Instant, GetRecordCfg, GetRecordError, NetworkError, Result, SwarmDriver, - CLOSE_GROUP_SIZE, + cmd::NetworkSwarmCmd, driver::PendingGetClosestType, get_quorum_value, GetRecordCfg, + GetRecordError, NetworkError, Result, SwarmDriver, CLOSE_GROUP_SIZE, }; use itertools::Itertools; -use libp2p::kad::{ - self, GetClosestPeersError, InboundRequest, PeerRecord, ProgressStep, QueryId, QueryResult, - QueryStats, Record, K_VALUE, +use libp2p::{ + kad::{ + self, GetClosestPeersError, InboundRequest, KBucketDistance, PeerRecord, ProgressStep, + QueryId, QueryResult, QueryStats, Quorum, Record, K_VALUE, + }, + PeerId, }; use sn_protocol::{ - storage::{try_serialize_record, RecordKind}, - PrettyPrintRecordKey, + messages::{Cmd, Request}, + storage::get_type_from_record, + NetworkAddress, PrettyPrintRecordKey, +}; +use std::{ + collections::{hash_map::Entry, HashSet}, + time::Instant, }; -use sn_transfers::SignedSpend; -use std::collections::{hash_map::Entry, BTreeSet, HashSet}; use tokio::sync::oneshot; use xor_name::XorName; @@ -31,6 +36,9 @@ impl SwarmDriver { let event_string; match kad_event { + // We use this query both to bootstrap and populate our routing table, + // but also to define our GetRange as defined by the largest distance between + // peers in any recent GetClosest call. kad::Event::OutboundQueryProgressed { id, result: QueryResult::GetClosestPeers(Ok(ref closest_peers)), @@ -45,7 +53,7 @@ impl SwarmDriver { ); if let Entry::Occupied(mut entry) = self.pending_get_closest_peers.entry(id) { - let (_, current_closest) = entry.get_mut(); + let (_, _, current_closest) = entry.get_mut(); // TODO: consider order the result and terminate when reach any of the // following criteria: @@ -53,16 +61,19 @@ impl SwarmDriver { // 2, `stats.duration()` is longer than a defined period current_closest.extend(closest_peers.peers.iter().map(|i| i.peer_id)); if current_closest.len() >= usize::from(K_VALUE) || step.last { - let (get_closest_type, current_closest) = entry.remove(); - match get_closest_type { - PendingGetClosestType::NetworkDiscovery => self - .network_discovery - .handle_get_closest_query(current_closest), - PendingGetClosestType::FunctionCall(sender) => { - sender - .send(current_closest) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } + let (address, get_closest_type, current_closest) = entry.remove(); + self.network_discovery + .handle_get_closest_query(¤t_closest); + + if let PendingGetClosestType::FunctionCall(sender) = get_closest_type { + sender + .send(current_closest) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } else { + // do not set this via function calls, as that could potentially + // skew the results in favour of heavily queried (and manipulated) + // areas of the network + self.set_request_range(address, ¤t_closest); } } } else { @@ -81,9 +92,8 @@ impl SwarmDriver { ref step, } => { event_string = "kad_event::get_closest_peers_err"; - error!("GetClosest Query task {id:?} errored with {err:?}, {stats:?} - {step:?}"); - let (get_closest_type, mut current_closest) = + let (address, get_closest_type, mut current_closest) = self.pending_get_closest_peers.remove(&id).ok_or_else(|| { debug!( "Can't locate query task {id:?}, it has likely been completed already." @@ -100,13 +110,23 @@ impl SwarmDriver { match err { GetClosestPeersError::Timeout { ref peers, .. } => { current_closest.extend(peers.iter().map(|i| i.peer_id)); + if current_closest.len() < CLOSE_GROUP_SIZE { + error!( + "GetClosest Query task {id:?} errored, not enough found. {err:?}, {stats:?} - {step:?}" + ); + } } } match get_closest_type { - PendingGetClosestType::NetworkDiscovery => self - .network_discovery - .handle_get_closest_query(current_closest), + PendingGetClosestType::NetworkDiscovery => { + // do not set this via function calls, as that could potentially + // skew the results in favour of heavily queried (and manipulated) + // areas of the network + self.set_request_range(address, ¤t_closest); + self.network_discovery + .handle_get_closest_query(¤t_closest); + } PendingGetClosestType::FunctionCall(sender) => { sender .send(current_closest) @@ -127,7 +147,7 @@ impl SwarmDriver { PrettyPrintRecordKey::from(&peer_record.record.key), peer_record.peer ); - self.accumulate_get_record_found(id, peer_record, stats, step)?; + self.accumulate_get_record_found(id, peer_record)?; } kad::Event::OutboundQueryProgressed { id, @@ -248,12 +268,13 @@ impl SwarmDriver { event_string = "kad_event::RoutingUpdated"; if is_new_peer { self.update_on_peer_addition(peer); + } + if !self.first_contact_made { // This should only happen once - if self.bootstrap.notify_new_peer() { - info!("Performing the first bootstrap"); - self.trigger_network_discovery(); - } + self.first_contact_made = true; + info!("Performing the first bootstrap"); + self.trigger_network_discovery(); } info!("kad_event::RoutingUpdated {:?}: {peer:?}, is_new_peer: {is_new_peer:?} old_peer: {old_peer:?}", self.peers_in_rt); @@ -320,6 +341,7 @@ impl SwarmDriver { // `QueryStats::requests` to be 20 (K-Value) // `QueryStats::success` to be over majority of the requests // `err::NotFound::closest_peers` contains a list of CLOSE_GROUP_SIZE peers + // // 2, targeting an existing entry // there will a sequence of (at least CLOSE_GROUP_SIZE) events of // `kad::Event::OutboundQueryProgressed` to be received @@ -333,26 +355,30 @@ impl SwarmDriver { // where: `cache_candidates`: being the peers supposed to hold the record but not // `ProgressStep::count`: to be `number of received copies plus one` // `ProgressStep::last` to be `true` + // + // /// Accumulates the GetRecord query results - /// If we get enough responses (quorum) for a record with the same content hash: + /// If we get enough responses (ie exceed GetRange) for a record with the same content hash: /// - we return the Record after comparing with the target record. This might return RecordDoesNotMatch if the /// check fails. /// - if multiple content hashes are found, we return a SplitRecord Error /// And then we stop the kad query as we are done here. + /// We do not need to wait for GetRange to be exceeded here and should return early. fn accumulate_get_record_found( &mut self, query_id: QueryId, peer_record: PeerRecord, - _stats: QueryStats, - step: ProgressStep, ) -> Result<()> { + let expected_get_range = self.get_request_range(); + let key = peer_record.record.key.clone(); + let peer_id = if let Some(peer_id) = peer_record.peer { peer_id } else { self.self_peer_id }; - let pretty_key = PrettyPrintRecordKey::from(&peer_record.record.key).into_owned(); + let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); if let Entry::Occupied(mut entry) = self.pending_get_record.entry(query_id) { let (_key, _senders, result_map, cfg) = entry.get_mut(); @@ -367,92 +393,97 @@ impl SwarmDriver { // Insert the record and the peer into the result_map. let record_content_hash = XorName::from_content(&peer_record.record.value); - let responded_peers = + + let peer_list = if let Entry::Occupied(mut entry) = result_map.entry(record_content_hash) { let (_, peer_list) = entry.get_mut(); + let _ = peer_list.insert(peer_id); - peer_list.len() + peer_list.clone() } else { let mut peer_list = HashSet::new(); let _ = peer_list.insert(peer_id); - result_map.insert(record_content_hash, (peer_record.record.clone(), peer_list)); - 1 + result_map.insert( + record_content_hash, + (peer_record.record.clone(), peer_list.clone()), + ); + + peer_list }; + let responded_peers = peer_list.len(); + let expected_answers = get_quorum_value(&cfg.get_quorum); + trace!("Expecting {expected_answers:?} answers to exceed {expected_get_range:?} for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); + // return error if the entry cannot be found + return Err(NetworkError::ReceivedKademliaEventDropped { + query_id, + event: format!("Accumulate Get Record of {pretty_key:?}"), + }); + } + Ok(()) + } - debug!("Expecting {expected_answers:?} answers for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); + /// Checks passed peers from a request and checks they are sufficiently spaced to + /// ensure we have searched enough of the network range as determined by our `get_range` + /// + /// We expect any conflicting records to have been reported prior to this check, + /// so we assume we're returning unique records only. + fn have_we_have_searched_thoroughly_for_quorum( + expected_get_range: KBucketDistance, + searched_peers_list: &HashSet, + data_key_address: &NetworkAddress, + quorum: &Quorum, + ) -> bool { + info!("Assessing search: range: {:?}, address: {data_key_address:?}, quorum required: {quorum:?}, peers_returned_count: {:?}", expected_get_range.ilog2(), searched_peers_list.len()); + let is_sensitive_data = matches!(quorum, Quorum::All); + + let required_quorum = get_quorum_value(quorum); + + let met_quorum = searched_peers_list.len() >= required_quorum; + + // we only enforce range if we have sensitive data...for data spends quorum::all + if met_quorum && !is_sensitive_data { + return true; + } - if responded_peers >= expected_answers { - if !cfg.expected_holders.is_empty() { - debug!("For record {pretty_key:?} task {query_id:?}, fetch completed with non-responded expected holders {:?}", cfg.expected_holders); - } - let cfg = cfg.clone(); + // get the farthest distance between peers in the response + let mut max_distance_to_data_from_responded_nodes = KBucketDistance::default(); - // Remove the query task and consume the variables. - let (_key, senders, result_map, _) = entry.remove(); + // iterate over peers and see if the distance to the data is greater than the get_range + for peer_id in searched_peers_list.iter() { + let peer_address = NetworkAddress::from_peer(*peer_id); + let distance_to_data = peer_address.distance(data_key_address); + if max_distance_to_data_from_responded_nodes < distance_to_data { + max_distance_to_data_from_responded_nodes = distance_to_data; + } + } - if result_map.len() == 1 { - Self::send_record_after_checking_target(senders, peer_record.record, &cfg)?; - } else { - debug!("For record {pretty_key:?} task {query_id:?}, fetch completed with split record"); - let mut accumulated_spends = BTreeSet::new(); - for (record, _) in result_map.values() { - match get_raw_signed_spends_from_record(record) { - Ok(spends) => { - accumulated_spends.extend(spends); - } - Err(_) => { - continue; - } - } - } - if !accumulated_spends.is_empty() { - info!("For record {pretty_key:?} task {query_id:?}, found split record for a spend, accumulated and sending them as a single record"); - let accumulated_spends = - accumulated_spends.into_iter().collect::>(); - - let bytes = try_serialize_record(&accumulated_spends, RecordKind::Spend)?; - - let new_accumulated_record = Record { - key: peer_record.record.key, - value: bytes.to_vec(), - publisher: None, - expires: None, - }; - for sender in senders { - let new_accumulated_record = new_accumulated_record.clone(); + // use ilog2 as simplified distance check + // It allows us to say "we've searched up to and including this bucket" + // as opposed to the concrete distance itself (which statistically seems like we can fall outwith a range + // quite easily with a small number of peers) + let exceeded_request_range = if max_distance_to_data_from_responded_nodes.ilog2() + < expected_get_range.ilog2() + { + let dist = max_distance_to_data_from_responded_nodes.ilog2(); + let expected_dist = expected_get_range.ilog2(); - sender - .send(Ok(new_accumulated_record)) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } - } else { - for sender in senders { - let result_map = result_map.clone(); - sender - .send(Err(GetRecordError::SplitRecord { result_map })) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } - } - } + warn!("RANGE: {data_key_address:?} Insufficient GetRange searched. {dist:?} {expected_dist:?} {max_distance_to_data_from_responded_nodes:?} is less than expcted GetRange of {expected_get_range:?}"); - // Stop the query; possibly stops more nodes from being queried. - if let Some(mut query) = self.swarm.behaviour_mut().kademlia.query_mut(&query_id) { - query.finish(); - } - } else if usize::from(step.count) >= CLOSE_GROUP_SIZE { - debug!("For record {pretty_key:?} task {query_id:?}, got {:?} with {} versions so far.", - step.count, result_map.len()); - } + false } else { - // return error if the entry cannot be found - return Err(NetworkError::ReceivedKademliaEventDropped { - query_id, - event: format!("Accumulate Get Record of {pretty_key:?}"), - }); + true + }; + + // We assume a finalised query has searched as far as it can in libp2p + + if exceeded_request_range && met_quorum { + warn!("RANGE: {data_key_address:?} Request satisfied as exceeded request range : {exceeded_request_range:?} and Quorum satisfied with {:?} peers exceeding quorum {required_quorum:?}", searched_peers_list.len()); + return true; } - Ok(()) + + false } /// Handles the possible cases when a GetRecord Query completes. @@ -469,16 +500,92 @@ impl SwarmDriver { let (result, log_string) = if let Some((record, from_peers)) = result_map.values().next() { - let result = if num_of_versions == 1 { - Err(GetRecordError::NotEnoughCopies { - record: record.clone(), - expected: get_quorum_value(&cfg.get_quorum), - got: from_peers.len(), - }) - } else { + let data_key_address = NetworkAddress::from_record_key(&record.key); + let expected_get_range = self.get_request_range(); + + let we_have_searched_thoroughly = Self::have_we_have_searched_thoroughly_for_quorum( + expected_get_range, + from_peers, + &data_key_address, + &cfg.get_quorum, + ); + + let pretty_key = PrettyPrintRecordKey::from(&record.key); + info!("RANGE: {pretty_key:?} we_have_searched_far_enough: {we_have_searched_thoroughly:?}"); + + let result = if num_of_versions > 1 { + warn!("RANGE: more than one version found!"); Err(GetRecordError::SplitRecord { result_map: result_map.clone(), }) + } else if we_have_searched_thoroughly { + warn!("RANGE: Get record finished: {pretty_key:?} Enough of the network has responded or it's not sensitive data... and we only have one copy..."); + + Ok(record.clone()) + } else { + // We have not searched enough of the network range. + let result = Err(GetRecordError::NotEnoughCopiesInRange { + record: record.clone(), + expected: get_quorum_value(&cfg.get_quorum), + got: from_peers.len(), + range: expected_get_range.ilog2().unwrap_or(0), + }); + + // This should be a backstop... Quorum::All is the only one that enforces + // a full search of the network range. + if matches!(cfg.get_quorum, Quorum::All) { + warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need to extend the range and PUT the data. {result:?}"); + + warn!("Reputting data to network {pretty_key:?}..."); + + // let's ensure we have an updated network view + self.trigger_network_discovery(); + + warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need PUT the data back into nodes in that range."); + + let record_type = get_type_from_record(record)?; + + let replicate_targets: HashSet<_> = self + .get_filtered_peers_exceeding_range_or_closest_nodes(&data_key_address) + .iter() + .cloned() + .collect(); + + if from_peers == &replicate_targets { + warn!("RANGE: {pretty_key:?} We asked everyone we know of in that range already!"); + } + + // set holder to someone that has the data + let holder = NetworkAddress::from_peer( + from_peers + .iter() + .next() + .cloned() + .unwrap_or(self.self_peer_id), + ); + + for peer in replicate_targets { + warn!("Reputting data to {peer:?} for {pretty_key:?} if needed..."); + // Do not send to any peer that has already informed us + if from_peers.contains(&peer) { + continue; + } + + debug!("RANGE: (insufficient, so ) Sending data to unresponded peer: {peer:?} for {pretty_key:?}"); + + // nodes will try/fail to trplicate it from us, but grab from the network thereafter + self.queue_network_swarm_cmd(NetworkSwarmCmd::SendRequest { + req: Request::Cmd(Cmd::Replicate { + holder: holder.clone(), + keys: vec![(data_key_address.clone(), record_type.clone())], + }), + peer, + sender: None, + }); + } + } + + result }; ( @@ -508,8 +615,6 @@ impl SwarmDriver { .map_err(|_| NetworkError::InternalMsgChannelDropped)?; } } else { - // We manually perform `query.finish()` if we return early from accumulate fn. - // Thus we will still get FinishedWithNoAdditionalRecord. debug!("Can't locate query task {query_id:?} during GetRecord finished. We might have already returned the result to the sender."); } Ok(()) diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 4550772bf4..ca6808ed1b 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -7,17 +7,21 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::NetworkSwarmCmd, log_markers::Marker, sort_peers_by_address, MsgResponder, NetworkError, - NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, + cmd::NetworkSwarmCmd, log_markers::Marker, sort_peers_by_address_and_limit, MsgResponder, + NetworkError, NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, }; -use itertools::Itertools; -use libp2p::request_response::{self, Message}; -use rand::{rngs::OsRng, thread_rng, Rng}; +use libp2p::{ + kad::RecordKey, + request_response::{self, Message}, + PeerId, +}; +use rand::{rngs::OsRng, Rng}; use sn_protocol::{ messages::{CmdResponse, Request, Response}, storage::RecordType, NetworkAddress, }; +use std::collections::HashMap; impl SwarmDriver { /// Forwards `Request` to the upper layers using `Sender`. Sends `Response` to the peers @@ -190,6 +194,9 @@ impl SwarmDriver { sender: NetworkAddress, incoming_keys: Vec<(NetworkAddress, RecordType)>, ) { + let peers = self.get_all_local_peers_excluding_self(); + let our_peer_id = self.self_peer_id; + let holder = if let Some(peer_id) = sender.as_peer_id() { peer_id } else { @@ -202,16 +209,12 @@ impl SwarmDriver { incoming_keys.len() ); - // accept replication requests from the K_VALUE peers away, - // giving us some margin for replication - let closest_k_peers = self.get_closest_k_value_local_peers(); - if !closest_k_peers.contains(&holder) || holder == self.self_peer_id { - debug!("Holder {holder:?} is self or not in replication range."); + // accept replication requests from all peers known peers within our GetRange + if !peers.contains(&holder) || holder == our_peer_id { + trace!("Holder {holder:?} is self or not in replication range."); return; } - let more_than_one_key = incoming_keys.len() > 1; - // On receive a replication_list from a close_group peer, we undertake two tasks: // 1, For those keys that we don't have: // fetch them if close enough to us @@ -224,81 +227,94 @@ impl SwarmDriver { .behaviour_mut() .kademlia .store_mut() - .record_addresses_ref(); - let keys_to_fetch = self - .replication_fetcher - .add_keys(holder, incoming_keys, all_keys); + .record_addresses_ref() + .clone(); + + let keys_to_fetch = + self.replication_fetcher + .add_keys(holder, incoming_keys, &all_keys, &peers); + if keys_to_fetch.is_empty() { debug!("no waiting keys to fetch from the network"); } else { self.send_event(NetworkEvent::KeysToFetchForReplication(keys_to_fetch)); } - // Only trigger chunk_proof check based every X% of the time - let mut rng = thread_rng(); - // 5% probability - if more_than_one_key && rng.gen_bool(0.05) { - self.verify_peer_storage(sender.clone()); + let event_sender = self.event_sender.clone(); + let _handle = tokio::spawn(async move { + let keys_to_verify = + Self::select_verification_data_candidates(&peers, &all_keys, &sender); + + if keys_to_verify.is_empty() { + debug!("No valid candidate to be checked against peer {holder:?}"); + } else if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: holder, + keys_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); + } // In additon to verify the sender, we also verify a random close node. // This is to avoid malicious node escaping the check by never send a replication_list. // With further reduced probability of 1% (5% * 20%) - if rng.gen_bool(0.2) { - let close_group_peers = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_local_peers(&self.self_peer_id.into()) - .map(|peer| peer.into_preimage()) - .take(CLOSE_GROUP_SIZE) - .collect_vec(); - if close_group_peers.len() == CLOSE_GROUP_SIZE { - loop { - let index: usize = OsRng.gen_range(0..close_group_peers.len()); - let candidate = NetworkAddress::from_peer(close_group_peers[index]); - if sender != candidate { - self.verify_peer_storage(candidate); - break; - } + let close_group_peers = sort_peers_by_address_and_limit( + &peers, + &NetworkAddress::from_peer(our_peer_id), + CLOSE_GROUP_SIZE, + ) + .unwrap_or_default(); + + loop { + let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate_peer_id = *close_group_peers[index]; + let candidate = NetworkAddress::from_peer(*close_group_peers[index]); + if sender != candidate { + let keys_to_verify = + Self::select_verification_data_candidates(&peers, &all_keys, &candidate); + + if keys_to_verify.is_empty() { + debug!("No valid candidate to be checked against peer {candidate:?}"); + } else if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: candidate_peer_id, + keys_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); } + + break; } } - } + }); } /// Check among all chunk type records that we have, select those close to the peer, /// and randomly pick one as the verification candidate. - fn verify_peer_storage(&mut self, peer: NetworkAddress) { - let mut closest_peers = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_local_peers(&self.self_peer_id.into()) - .map(|peer| peer.into_preimage()) - .take(20) - .collect_vec(); - closest_peers.push(self.self_peer_id); - + fn select_verification_data_candidates( + all_peers: &Vec, + all_keys: &HashMap, + peer: &NetworkAddress, + ) -> Vec { let target_peer = if let Some(peer_id) = peer.as_peer_id() { peer_id } else { error!("Target {peer:?} is not a valid PeerId"); - return; + return vec![]; }; - let all_keys = self - .swarm - .behaviour_mut() - .kademlia - .store_mut() - .record_addresses_ref(); - // Targeted chunk type record shall be expected within the close range from our perspective. let mut verify_candidates: Vec = all_keys .values() .filter_map(|(addr, record_type)| { if RecordType::Chunk == *record_type { - match sort_peers_by_address(&closest_peers, addr, CLOSE_GROUP_SIZE) { + // Here we take the actual closest, as this is where we want to be + // strict about who does have the data... + match sort_peers_by_address_and_limit(all_peers, addr, CLOSE_GROUP_SIZE) { Ok(close_group) => { if close_group.contains(&&target_peer) { Some(addr.clone()) @@ -319,17 +335,6 @@ impl SwarmDriver { verify_candidates.sort_by_key(|a| peer.distance(a)); - // To ensure the candidate mush have to be held by the peer, - // we only carry out check when there are already certain amount of chunks uploaded - // AND choose candidate from certain reduced range. - if verify_candidates.len() > 50 { - let index: usize = OsRng.gen_range(0..(verify_candidates.len() / 2)); - self.send_event(NetworkEvent::ChunkProofVerification { - peer_id: target_peer, - keys_to_verify: vec![verify_candidates[index].clone()], - }); - } else { - debug!("No valid candidate to be checked against peer {peer:?}"); - } + verify_candidates } } diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 982088f102..2416b5681c 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, relay_manager::is_a_relayed_peer, - target_arch::Instant, NetworkEvent, Result, SwarmDriver, + cmd::NetworkSwarmCmd, event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, + relay_manager::is_a_relayed_peer, target_arch::Instant, NetworkEvent, Result, SwarmDriver, }; #[cfg(feature = "local")] use libp2p::mdns; @@ -25,7 +25,7 @@ use libp2p::{ }; use sn_protocol::version::{IDENTIFY_NODE_VERSION_STR, IDENTIFY_PROTOCOL_STR}; use std::collections::HashSet; -use tokio::time::Duration; +use tokio::{sync::oneshot, time::Duration}; impl SwarmDriver { /// Handle `SwarmEvents` @@ -244,7 +244,7 @@ impl SwarmDriver { } // If we are not local, we care only for peers that we dialed and thus are reachable. - if self.local || has_dialed { + if !self.local && has_dialed { // A bad node cannot establish a connection with us. So we can add it to the RT directly. self.remove_bootstrap_from_full(peer_id); @@ -254,7 +254,10 @@ impl SwarmDriver { multiaddr.iter().any(|p| matches!(p, Protocol::P2pCircuit)) }); } + } + if self.local || has_dialed { + // If we are not local, we care only for peers that we dialed and thus are reachable. debug!(%peer_id, ?addrs, "identify: attempting to add addresses to routing table"); // Attempt to add the addresses to the routing table. @@ -392,6 +395,7 @@ impl SwarmDriver { let _ = self.live_connected_peers.remove(&connection_id); self.record_connection_metrics(); + let mut failed_peer_addresses = vec![]; // we need to decide if this was a critical error and the peer should be removed from the routing table let should_clean_peer = match error { DialError::Transport(errors) => { @@ -401,10 +405,14 @@ impl SwarmDriver { // so we default to it not being a real issue // unless there are _specific_ errors (connection refused eg) error!("Dial errors len : {:?}", errors.len()); - let mut there_is_a_serious_issue = false; - for (_addr, err) in errors { + let mut remove_peer_track_peer_issue = false; + for (addr, err) in errors { error!("OutgoingTransport error : {err:?}"); + if !failed_peer_addresses.contains(&addr) { + failed_peer_addresses.push(addr) + } + match err { TransportError::MultiaddrNotSupported(addr) => { warn!("Multiaddr not supported : {addr:?}"); @@ -414,14 +422,13 @@ impl SwarmDriver { println!("If this was your bootstrap peer, restart your node with a supported multiaddr"); } // if we can't dial a peer on a given address, we should remove it from the routing table - there_is_a_serious_issue = true + remove_peer_track_peer_issue = false } TransportError::Other(err) => { - let problematic_errors = [ - "ConnectionRefused", - "HostUnreachable", - "HandshakeTimedOut", - ]; + let problematic_errors = + ["ConnectionRefused", "HostUnreachable"]; + + let intermittent_errors = ["HandshakeTimedOut"]; let is_bootstrap_peer = self .bootstrap_peers @@ -432,7 +439,7 @@ impl SwarmDriver { && self.peers_in_rt < self.bootstrap_peers.len() { warn!("OutgoingConnectionError: On bootstrap peer {failed_peer_id:?}, while still in bootstrap mode, ignoring"); - there_is_a_serious_issue = false; + remove_peer_track_peer_issue = false; } else { // It is really difficult to match this error, due to being eg: // Custom { kind: Other, error: Left(Left(Os { code: 61, kind: ConnectionRefused, message: "Connection refused" })) } @@ -443,13 +450,19 @@ impl SwarmDriver { .any(|err| error_msg.contains(err)) { warn!("Problematic error encountered: {error_msg}"); - there_is_a_serious_issue = true; + remove_peer_track_peer_issue = true; + } else if intermittent_errors + .iter() + .any(|err| error_msg.contains(err)) + { + warn!("Intermittent error encountered: {error_msg}"); + remove_peer_track_peer_issue = false; } } } } } - there_is_a_serious_issue + remove_peer_track_peer_issue } DialError::NoAddresses => { // We provided no address, and while we can't really blame the peer @@ -490,7 +503,7 @@ impl SwarmDriver { }; if should_clean_peer { - warn!("Tracking issue of {failed_peer_id:?}. Clearing it out for now"); + warn!("Serious issue with {failed_peer_id:?}. Clearing it out for now"); if let Some(dead_peer) = self .swarm @@ -501,6 +514,15 @@ impl SwarmDriver { self.update_on_peer_removal(*dead_peer.node.key.preimage()); } } + + if !should_clean_peer { + // lets try and redial. + for addr in failed_peer_addresses { + let (sender, _recv) = oneshot::channel(); + + self.queue_network_swarm_cmd(NetworkSwarmCmd::Dial { addr, sender }); + } + } } SwarmEvent::IncomingConnectionError { connection_id, diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 27f07bdb3e..c9244dbc46 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -78,10 +78,6 @@ use tokio::time::Duration; /// The type of quote for a selected payee. pub type PayeeQuote = (PeerId, RewardsAddress, PaymentQuote); -/// The count of peers that will be considered as close to a record target, -/// that a replication of the record shall be sent/accepted to/by the peer. -pub const REPLICATION_PEERS_COUNT: usize = CLOSE_GROUP_SIZE + 2; - /// Majority of a given group (i.e. > 1/2). #[inline] pub const fn close_group_majority() -> usize { @@ -97,17 +93,47 @@ const MIN_WAIT_BEFORE_READING_A_PUT: Duration = Duration::from_millis(300); /// Sort the provided peers by their distance to the given `NetworkAddress`. /// Return with the closest expected number of entries if has. -pub fn sort_peers_by_address<'a>( +pub fn sort_peers_by_address_and_limit<'a>( peers: &'a Vec, address: &NetworkAddress, expected_entries: usize, ) -> Result> { - sort_peers_by_key(peers, &address.as_kbucket_key(), expected_entries) + sort_peers_by_key_and_limit(peers, &address.as_kbucket_key(), expected_entries) +} + +/// Sort the provided peers by their distance to the given `NetworkAddress`. +/// Return with the closest expected number of entries if has. +pub fn sort_peers_by_distance_to( + peers: &[PeerId], + queried_address: NetworkAddress, +) -> Vec { + let mut sorted_distances: Vec<_> = peers + .iter() + .map(|peer| { + let addr = NetworkAddress::from_peer(*peer); + queried_address.distance(&addr) + }) + .collect(); + + sorted_distances.sort(); + + sorted_distances +} + +/// Sort the provided peers by their distance to the given `NetworkAddress`. +/// Return with the closest expected number of entries if has. +#[allow(clippy::result_large_err)] +pub fn sort_peers_by_address_and_limit_by_distance<'a>( + peers: &'a Vec, + address: &NetworkAddress, + distance: KBucketDistance, +) -> Result> { + limit_peers_by_distance(peers, &address.as_kbucket_key(), distance) } /// Sort the provided peers by their distance to the given `KBucketKey`. /// Return with the closest expected number of entries if has. -pub fn sort_peers_by_key<'a, T>( +pub fn sort_peers_by_key_and_limit<'a, T>( peers: &'a Vec, key: &KBucketKey, expected_entries: usize, @@ -144,6 +170,40 @@ pub fn sort_peers_by_key<'a, T>( Ok(sorted_peers) } +/// Only return peers closer to key than the provided distance +/// Their distance is measured by closeness to the given `KBucketKey`. +/// Return with the closest expected number of entries if has. +#[allow(clippy::result_large_err)] +pub fn limit_peers_by_distance<'a, T>( + peers: &'a Vec, + key: &KBucketKey, + distance: KBucketDistance, +) -> Result> { + // Check if there are enough peers to satisfy the request. + // bail early if that's not the case + if CLOSE_GROUP_SIZE > peers.len() { + warn!("Not enough peers in the k-bucket to satisfy the request"); + return Err(NetworkError::NotEnoughPeers { + found: peers.len(), + required: CLOSE_GROUP_SIZE, + }); + } + + // Create a vector of tuples where each tuple is a reference to a peer and its distance to the key. + // This avoids multiple computations of the same distance in the sorting process. + let mut peers_within_distance: Vec<&PeerId> = Vec::with_capacity(peers.len()); + + for peer_id in peers { + let addr = NetworkAddress::from_peer(*peer_id); + let peer_distance = key.distance(&addr.as_kbucket_key()); + + if peer_distance < distance { + peers_within_distance.push(peer_id); + } + } + + Ok(peers_within_distance) +} #[derive(Clone, Debug)] /// API to interact with the underlying Swarm @@ -197,6 +257,13 @@ impl Network { &self.inner.local_swarm_cmd_sender } + /// Return the GetRange as determined by the internal SwarmDriver + pub async fn get_range(&self) -> Result { + let (sender, receiver) = oneshot::channel(); + self.send_local_swarm_cmd(LocalSwarmCmd::GetCurrentRange { sender }); + receiver.await.map_err(NetworkError::from) + } + /// Signs the given data with the node's keypair. pub fn sign(&self, msg: &[u8]) -> Result> { self.keypair().sign(msg).map_err(NetworkError::from) @@ -220,19 +287,121 @@ impl Network { receiver.await? } + /// Replicate a fresh record to its close group peers. + /// This should not be triggered by a record we receive via replicaiton fetch + pub async fn replicate_valid_fresh_record(&self, paid_key: RecordKey, record_type: RecordType) { + let network = self; + + let start = std::time::Instant::now(); + let pretty_key = PrettyPrintRecordKey::from(&paid_key); + + // first we wait until our own network store can return the record + // otherwise it may not be fully written yet + let mut retry_count = 0; + trace!("Checking we have successfully stored the fresh record {pretty_key:?} in the store before replicating"); + loop { + let record = match network.get_local_record(&paid_key).await { + Ok(record) => record, + Err(err) => { + error!( + "Replicating fresh record {pretty_key:?} get_record_from_store errored: {err:?}" + ); + None + } + }; + + if record.is_some() { + break; + } + + if retry_count > 10 { + error!( + "Could not get record from store for replication: {pretty_key:?} after 10 retries" + ); + return; + } + + retry_count += 1; + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + trace!("Start replication of fresh record {pretty_key:?} from store"); + + let all_peers = match network.get_all_local_peers_excluding_self().await { + Ok(peers) => peers, + Err(err) => { + error!( + "Replicating fresh record {pretty_key:?} get_all_local_peers errored: {err:?}" + ); + return; + } + }; + + let data_addr = NetworkAddress::from_record_key(&paid_key); + let mut peers_to_replicate_to = match network.get_range().await { + Err(error) => { + error!("Replicating fresh record {pretty_key:?} get_range errored: {error:?}"); + + return; + } + + Ok(our_get_range) => { + match sort_peers_by_address_and_limit_by_distance( + &all_peers, + &data_addr, + our_get_range, + ) { + Ok(result) => result, + Err(err) => { + error!("When replicating fresh record {pretty_key:?}, sort error: {err:?}"); + return; + } + } + } + }; + + if peers_to_replicate_to.len() < CLOSE_GROUP_SIZE { + warn!( + "Replicating fresh record {pretty_key:?} current GetRange insufficient for secure replication. Falling back to CLOSE_GROUP_SIZE" + ); + + peers_to_replicate_to = + match sort_peers_by_address_and_limit(&all_peers, &data_addr, CLOSE_GROUP_SIZE) { + Ok(result) => result, + Err(err) => { + error!("When replicating fresh record {pretty_key:?}, sort error: {err:?}"); + return; + } + }; + } + + let our_peer_id = network.peer_id(); + let our_address = NetworkAddress::from_peer(our_peer_id); + #[allow(clippy::mutable_key_type)] // for Bytes in NetworkAddress + let keys = vec![(data_addr.clone(), record_type.clone())]; + + for peer_id in &peers_to_replicate_to { + trace!("Replicating fresh record {pretty_key:?} to {peer_id:?}"); + let request = Request::Cmd(Cmd::Replicate { + holder: our_address.clone(), + keys: keys.clone(), + }); + + network.send_req_ignore_reply(request, **peer_id); + } + trace!( + "Completed replicate fresh record {pretty_key:?} to {:?} peers on store, in {:?}", + peers_to_replicate_to.len(), + start.elapsed() + ); + } + /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. /// Excludes the client's `PeerId` while calculating the closest peers. pub async fn client_get_closest_peers(&self, key: &NetworkAddress) -> Result> { self.get_closest_peers(key, true).await } - /// Returns the closest peers to the given `NetworkAddress`, sorted by their distance to the key. - /// - /// Includes our node's `PeerId` while calculating the closest peers. - pub async fn node_get_closest_peers(&self, key: &NetworkAddress) -> Result> { - self.get_closest_peers(key, false).await - } - /// Returns a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that /// bucket. /// Does not include self @@ -245,10 +414,10 @@ impl Network { } /// Returns all the PeerId from all the KBuckets from our local Routing Table - /// Also contains our own PeerId. - pub async fn get_closest_k_value_local_peers(&self) -> Result> { + /// Excludes our own PeerId. + pub async fn get_all_local_peers_excluding_self(&self) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_local_swarm_cmd(LocalSwarmCmd::GetClosestKLocalPeers { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetAllLocalPeersExcludingSelf { sender }); receiver .await @@ -498,6 +667,10 @@ impl Network { key: RecordKey, cfg: &GetRecordCfg, ) -> Result { + use std::collections::BTreeSet; + + use sn_transfers::SignedSpend; + let retry_duration = cfg.retry_strategy.map(|strategy| strategy.get_duration()); backoff::future::retry( backoff::ExponentialBackoff { @@ -528,7 +701,7 @@ impl Network { Err(GetRecordError::RecordDoesNotMatch(_)) => { warn!("The returned record does not match target {pretty_key:?}."); } - Err(GetRecordError::NotEnoughCopies { expected, got, .. }) => { + Err(GetRecordError::NotEnoughCopiesInRange { expected, got, .. }) => { warn!("Not enough copies ({got}/{expected}) found yet for {pretty_key:?}."); } // libp2p RecordNotFound does mean no holders answered. @@ -537,8 +710,39 @@ impl Network { Err(GetRecordError::RecordNotFound) => { warn!("No holder of record '{pretty_key:?}' found."); } - Err(GetRecordError::SplitRecord { .. }) => { + Err(GetRecordError::SplitRecord { result_map }) => { error!("Encountered a split record for {pretty_key:?}."); + + // attempt to deserialise and accumulate any spends + let mut accumulated_spends = BTreeSet::new(); + let results_count = result_map.len(); + // try and accumulate any SpendAttempts + if results_count > 1 { + info!("For record {pretty_key:?}, we have more than one result returned."); + // Allow for early bail if we've already seen a split SpendAttempt + for (record, _) in result_map.values() { + match get_raw_signed_spends_from_record(record) { + Ok(spends) => { + accumulated_spends.extend(spends); + } + Err(_) => { + continue; + } + } + } + } + + // we have a Double SpendAttempt and will exit + if accumulated_spends.len() > 1 { + info!("For record {pretty_key:?} task found split record for a spend, accumulated and sending them as a single record"); + let accumulated_spends = + accumulated_spends.into_iter().collect::>(); + + return Err(backoff::Error::Permanent(NetworkError::DoubleSpendAttempt( + accumulated_spends, + ))); + } + } Err(GetRecordError::QueryTimeout) => { error!("Encountered query timeout for {pretty_key:?}."); @@ -903,7 +1107,7 @@ impl Network { debug!("Network knowledge of close peers to {key:?} are: {close_peers_pretty_print:?}"); } - let closest_peers = sort_peers_by_address(&closest_peers, key, CLOSE_GROUP_SIZE)?; + let closest_peers = sort_peers_by_address_and_limit(&closest_peers, key, CLOSE_GROUP_SIZE)?; Ok(closest_peers.into_iter().cloned().collect()) } diff --git a/sn_networking/src/network_discovery.rs b/sn_networking/src/network_discovery.rs index f3f4986134..3d82c944fb 100644 --- a/sn_networking/src/network_discovery.rs +++ b/sn_networking/src/network_discovery.rs @@ -8,7 +8,6 @@ use crate::target_arch::Instant; use libp2p::{kad::KBucketKey, PeerId}; -use rand::{thread_rng, Rng}; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use sn_protocol::NetworkAddress; use std::collections::{btree_map::Entry, BTreeMap}; @@ -52,13 +51,13 @@ impl NetworkDiscovery { } /// The result from the kad::GetClosestPeers are again used to update our kbucket. - pub(crate) fn handle_get_closest_query(&mut self, closest_peers: Vec) { + pub(crate) fn handle_get_closest_query(&mut self, closest_peers: &[PeerId]) { let now = Instant::now(); let candidates_map: BTreeMap> = closest_peers - .into_iter() + .iter() .filter_map(|peer| { - let peer = NetworkAddress::from_peer(peer); + let peer = NetworkAddress::from_peer(*peer); let peer_key = peer.as_kbucket_key(); peer_key .distance(&self.self_key) @@ -83,18 +82,28 @@ impl NetworkDiscovery { /// Returns one random candidate per bucket. Also tries to refresh the candidate list. /// Todo: Limit the candidates to return. Favor the closest buckets. - pub(crate) fn candidates(&mut self) -> Vec<&NetworkAddress> { - self.try_refresh_candidates(); - - let mut rng = thread_rng(); + pub(crate) fn candidates(&mut self) -> Vec { let mut op = Vec::with_capacity(self.candidates.len()); - let candidates = self.candidates.values().filter_map(|candidates| { - // get a random index each time - let random_index = rng.gen::() % candidates.len(); - candidates.get(random_index) - }); - op.extend(candidates); + let mut generate_fresh_candidates = false; + for addresses in self.candidates.values_mut() { + // get a random candidate from each bucket each time + if addresses.is_empty() { + generate_fresh_candidates = true; + continue; + } + + // remove the first each time + let address = addresses.remove(0); + op.push(address); + } + + if generate_fresh_candidates { + // we only refresh when we are running low on candidates + self.try_refresh_candidates(); + } + + debug!("Candidates returned: {}", op.len()); op } diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index ee4e413c5e..0551fc03f2 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -90,7 +90,7 @@ pub struct NodeRecordStore { /// ilog2 distance range of responsible records /// AKA: how many buckets of data do we consider "close" /// None means accept all records. - responsible_distance_range: Option, + responsible_distance_range: Option, #[cfg(feature = "open-metrics")] /// Used to report the number of records held by the store to the metrics server. record_count_metric: Option, @@ -315,11 +315,6 @@ impl NodeRecordStore { self } - /// Returns the current distance ilog2 (aka bucket) range of CLOSE_GROUP nodes. - pub fn get_responsible_distance_range(&self) -> Option { - self.responsible_distance_range - } - // Converts a Key into a Hex string. fn generate_filename(key: &Key) -> String { hex::encode(key.as_ref()) @@ -474,8 +469,7 @@ impl NodeRecordStore { let mut removed_keys = Vec::new(); self.records.retain(|key, _val| { let kbucket_key = KBucketKey::new(key.to_vec()); - let is_in_range = - responsible_range >= self.local_key.distance(&kbucket_key).ilog2().unwrap_or(0); + let is_in_range = responsible_range >= self.local_key.distance(&kbucket_key); if !is_in_range { removed_keys.push(key.clone()); } @@ -699,7 +693,7 @@ impl NodeRecordStore { pub fn get_records_within_distance_range( &self, records: HashSet<&Key>, - distance_range: u32, + distance_range: Distance, ) -> usize { debug!( "Total record count is {:?}. Distance is: {distance_range:?}", @@ -710,7 +704,7 @@ impl NodeRecordStore { .iter() .filter(|key| { let kbucket_key = KBucketKey::new(key.to_vec()); - distance_range >= self.local_key.distance(&kbucket_key).ilog2().unwrap_or(0) + distance_range >= self.local_key.distance(&kbucket_key) }) .count(); @@ -719,8 +713,8 @@ impl NodeRecordStore { } /// Setup the distance range. - pub(crate) fn set_responsible_distance_range(&mut self, farthest_responsible_bucket: u32) { - self.responsible_distance_range = Some(farthest_responsible_bucket); + pub(crate) fn set_responsible_distance_range(&mut self, farthest_distance: Distance) { + self.responsible_distance_range = Some(farthest_distance); } } @@ -1500,10 +1494,7 @@ mod tests { .wrap_err("Could not parse record store key")?, ); // get the distance to this record from our local key - let distance = self_address - .distance(&halfway_record_address) - .ilog2() - .unwrap_or(0); + let distance = self_address.distance(&halfway_record_address); // must be plus one bucket from the halfway record store.set_responsible_distance_range(distance); diff --git a/sn_networking/src/record_store_api.rs b/sn_networking/src/record_store_api.rs index 8e3bc67364..64fd790ccd 100644 --- a/sn_networking/src/record_store_api.rs +++ b/sn_networking/src/record_store_api.rs @@ -10,7 +10,7 @@ use crate::record_store::{ClientRecordStore, NodeRecordStore}; use libp2p::kad::{ store::{RecordStore, Result}, - ProviderRecord, Record, RecordKey, + KBucketDistance, ProviderRecord, Record, RecordKey, }; use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{storage::RecordType, NetworkAddress}; @@ -130,17 +130,7 @@ impl UnifiedRecordStore { } } - pub(crate) fn get_farthest_replication_distance_bucket(&self) -> Option { - match self { - Self::Client(_store) => { - warn!("Calling get_distance_range at Client. This should not happen"); - None - } - Self::Node(store) => store.get_responsible_distance_range(), - } - } - - pub(crate) fn set_distance_range(&mut self, distance: u32) { + pub(crate) fn set_distance_range(&mut self, distance: KBucketDistance) { match self { Self::Client(_store) => { warn!("Calling set_distance_range at Client. This should not happen"); diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 1b90ac9a53..5e0d3a3ad4 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -8,7 +8,9 @@ #![allow(clippy::mutable_key_type)] use crate::target_arch::spawn; +use crate::CLOSE_GROUP_SIZE; use crate::{event::NetworkEvent, target_arch::Instant}; +use itertools::Itertools; use libp2p::{ kad::{KBucketDistance as Distance, RecordKey, K_VALUE}, PeerId, @@ -41,8 +43,8 @@ pub(crate) struct ReplicationFetcher { // Avoid fetching same chunk from different nodes AND carry out too many parallel tasks. on_going_fetches: HashMap<(RecordKey, RecordType), (PeerId, ReplicationTimeout)>, event_sender: mpsc::Sender, - /// ilog2 bucket distance range that the incoming key shall be fetched - distance_range: Option, + /// KBucketDistance range that the incoming key shall be fetched + distance_range: Option, /// Restrict fetch range to closer than this value /// used when the node is full, but we still have "close" data coming in /// that is _not_ closer than our farthest max record @@ -63,7 +65,7 @@ impl ReplicationFetcher { } /// Set the distance range. - pub(crate) fn set_replication_distance_range(&mut self, distance_range: u32) { + pub(crate) fn set_replication_distance_range(&mut self, distance_range: Distance) { self.distance_range = Some(distance_range); } @@ -76,6 +78,7 @@ impl ReplicationFetcher { holder: PeerId, incoming_keys: Vec<(NetworkAddress, RecordType)>, locally_stored_keys: &HashMap, + all_local_peers: &[PeerId], ) -> Vec<(PeerId, RecordKey)> { // remove locally stored from incoming_keys let mut new_incoming_keys: Vec<_> = incoming_keys @@ -133,12 +136,30 @@ impl ReplicationFetcher { .retain(|_, time_out| *time_out > Instant::now()); let mut out_of_range_keys = vec![]; + // Filter out those out_of_range ones among the incoming_keys. if let Some(ref distance_range) = self.distance_range { new_incoming_keys.retain(|(addr, _record_type)| { - let is_in_range = - self_address.distance(addr).ilog2().unwrap_or(0) <= *distance_range; + // find all closer peers to the data + let closer_peers_len = all_local_peers + .iter() + .filter(|peer_id| { + let peer_address = NetworkAddress::from_peer(**peer_id); + addr.distance(&peer_address) <= *distance_range + }) + .collect_vec() + .len(); + + // we consider ourselves in range if + // A) We don't know enough closer peers than ourselves + // or B) The distance to the data is within our GetRange + let is_in_range = closer_peers_len <= CLOSE_GROUP_SIZE + || self_address.distance(addr).ilog2() <= distance_range.ilog2(); if !is_in_range { + warn!( + "Rejecting incoming key: {addr:?} as out of range. {:?} is larger than {:?} ", + self_address.distance(addr).ilog2(), + distance_range.ilog2()); out_of_range_keys.push(addr.clone()); } is_in_range @@ -428,8 +449,12 @@ mod tests { incoming_keys.push((key, RecordType::Chunk)); }); - let keys_to_fetch = - replication_fetcher.add_keys(PeerId::random(), incoming_keys, &locally_stored_keys); + let keys_to_fetch = replication_fetcher.add_keys( + PeerId::random(), + incoming_keys, + &locally_stored_keys, + &[], + ); assert_eq!(keys_to_fetch.len(), MAX_PARALLEL_FETCH); // we should not fetch anymore keys @@ -441,6 +466,7 @@ mod tests { PeerId::random(), vec![(key_1, RecordType::Chunk), (key_2, RecordType::Chunk)], &locally_stored_keys, + &[], ); assert!(keys_to_fetch.is_empty()); @@ -451,6 +477,7 @@ mod tests { PeerId::random(), vec![(key, RecordType::Chunk)], &locally_stored_keys, + &[], ); assert!(!keys_to_fetch.is_empty()); @@ -476,34 +503,41 @@ mod tests { let mut replication_fetcher = ReplicationFetcher::new(peer_id, event_sender); // Set distance range + // way to update this test let distance_target = NetworkAddress::from_peer(PeerId::random()); - let distance_range = self_address.distance(&distance_target).ilog2().unwrap_or(1); + let distance_range = self_address.distance(&distance_target); replication_fetcher.set_replication_distance_range(distance_range); + // generate a list of close peers + let close_peers = (0..100).map(|_| PeerId::random()).collect::>(); + let mut incoming_keys = Vec::new(); let mut in_range_keys = 0; (0..100).for_each(|_| { let random_data: Vec = (0..50).map(|_| rand::random::()).collect(); let key = NetworkAddress::from_record_key(&RecordKey::from(random_data)); - if key.distance(&self_address).ilog2().unwrap_or(0) <= distance_range { + if key.distance(&self_address).ilog2() <= distance_range.ilog2() { in_range_keys += 1; } incoming_keys.push((key, RecordType::Chunk)); }); - let keys_to_fetch = - replication_fetcher.add_keys(PeerId::random(), incoming_keys, &Default::default()); + let keys_to_fetch = replication_fetcher.add_keys( + PeerId::random(), + incoming_keys, + &Default::default(), + &close_peers, + ); assert_eq!( keys_to_fetch.len(), replication_fetcher.on_going_fetches.len(), "keys to fetch and ongoing fetches should match" ); - assert_eq!( - in_range_keys, - keys_to_fetch.len() + replication_fetcher.to_be_fetched.len(), - "all keys should be in range and in the fetcher" + assert!( + keys_to_fetch.len() + replication_fetcher.to_be_fetched.len() >= in_range_keys, + "at least all keys in range should be in the fetcher" ); } } diff --git a/sn_networking/src/transfers.rs b/sn_networking/src/transfers.rs index 76b6349ce1..40c6182f94 100644 --- a/sn_networking/src/transfers.rs +++ b/sn_networking/src/transfers.rs @@ -6,9 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{ - close_group_majority, driver::GetRecordCfg, GetRecordError, Network, NetworkError, Result, -}; +use crate::{driver::GetRecordCfg, Network, NetworkError, Result}; use libp2p::kad::{Quorum, Record}; use sn_protocol::{ storage::{try_deserialize_record, RecordHeader, RecordKind, RetryStrategy, SpendAddress}, @@ -39,7 +37,7 @@ impl Network { }; let record = self.get_record_from_network(key.clone(), &get_cfg).await?; debug!( - "Got record from the network, {:?}", + "Got raw spends from the network, {:?}", PrettyPrintRecordKey::from(&record.key) ); get_raw_signed_spends_from_record(&record) @@ -51,38 +49,14 @@ impl Network { /// If we get a quorum error, we increase the RetryStrategy pub async fn get_spend(&self, address: SpendAddress) -> Result { let key = NetworkAddress::from_spend_address(address).to_record_key(); - let mut get_cfg = GetRecordCfg { + let get_cfg = GetRecordCfg { get_quorum: Quorum::All, retry_strategy: Some(RetryStrategy::Quick), target_record: None, expected_holders: Default::default(), is_register: false, }; - let record = match self.get_record_from_network(key.clone(), &get_cfg).await { - Ok(record) => record, - Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopies { - record, - expected, - got, - })) => { - // if majority holds the spend, it might be worth to be trusted. - if got >= close_group_majority() { - debug!("At least a majority nodes hold the spend {address:?}, going to trust it if can fetch with majority again."); - get_cfg.get_quorum = Quorum::Majority; - get_cfg.retry_strategy = Some(RetryStrategy::Balanced); - self.get_record_from_network(key, &get_cfg).await? - } else { - return Err(NetworkError::GetRecordError( - GetRecordError::NotEnoughCopies { - record, - expected, - got, - }, - )); - } - } - Err(err) => return Err(err), - }; + let record = self.get_record_from_network(key.clone(), &get_cfg).await?; debug!( "Got record from the network, {:?}", PrettyPrintRecordKey::from(&record.key) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 3f3343f403..b0dd3f6857 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -552,7 +552,7 @@ impl Node { }; debug!( - "Got {} validated spends with key: {unique_pubkey:?} at {pretty_key:?}", + "Found {} spends with key: {unique_pubkey:?} at {pretty_key:?}", validated_spends.len() ); @@ -564,14 +564,12 @@ impl Node { expires: None, }; self.network().put_local_record(record); - debug!( - "Successfully stored validated spends with key: {unique_pubkey:?} at {pretty_key:?}" - ); + debug!("Successfully stored spends with key: {unique_pubkey:?} at {pretty_key:?}"); // Just log the double spend attempt. DoubleSpend error during PUT is not used and would just lead to // RecordRejected marker (which is incorrect, since we store double spends). if validated_spends.len() > 1 { - warn!("Got double spend(s) of len {} for the Spend PUT with unique_pubkey {unique_pubkey}", validated_spends.len()); + warn!("Got Burnt SpendAttempts of len {} for the Spend PUT with unique_pubkey {unique_pubkey} at {pretty_key:?}", validated_spends.len()); } self.record_metrics(Marker::ValidSpendRecordPutFromNetwork(&pretty_key)); @@ -756,13 +754,14 @@ impl Node { } spends } - Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopies { + Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopiesInRange { record, got, + range, .. })) => { info!( - "Retrieved {got} copies of the record for {unique_pubkey:?} from the network" + "Retrieved {got} copies of the record for {unique_pubkey:?} from the network in range {range}" ); match get_raw_signed_spends_from_record(&record) { Ok(spends) => spends, diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index 59e0cff078..80ec25b157 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -6,15 +6,18 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{error::Result, node::Node}; +use crate::{ + error::{Error, Result}, + node::Node, +}; use libp2p::{ kad::{Quorum, Record, RecordKey}, PeerId, }; -use sn_networking::{sort_peers_by_address, GetRecordCfg, Network, REPLICATION_PEERS_COUNT}; +use sn_networking::{GetRecordCfg, Network}; use sn_protocol::{ - messages::{Cmd, Query, QueryResponse, Request, Response}, - storage::RecordType, + messages::{Query, QueryResponse, Request, Response}, + storage::{try_serialize_record, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; use tokio::task::spawn; @@ -79,12 +82,27 @@ impl Node { // Hence value of the flag actually doesn't matter. is_register: false, }; - match node.network().get_record_from_network(key, &get_cfg).await { + match node + .network() + .get_record_from_network(key.clone(), &get_cfg) + .await + { Ok(record) => record, - Err(err) => { - error!("During replication fetch of {pretty_key:?}, failed in re-attempt of get from network {err:?}"); - return; - } + Err(error) => match error { + sn_networking::NetworkError::DoubleSpendAttempt(spends) => { + debug!("Failed to fetch record {pretty_key:?} from the network, double spend attempt {spends:?}"); + + let bytes = try_serialize_record(&spends, RecordKind::Spend)?; + + Record { + key, + value: bytes.to_vec(), + publisher: None, + expires: None, + } + } + other_error => return Err(other_error.into()), + }, } }; @@ -96,6 +114,7 @@ impl Node { } else { debug!("Completed storing Replication Record {pretty_key:?} from network."); } + Ok::<(), Error>(()) }); } Ok(()) @@ -111,86 +130,9 @@ impl Node { let network = self.network().clone(); let _handle = spawn(async move { - let start = std::time::Instant::now(); - let pretty_key = PrettyPrintRecordKey::from(&paid_key); - - // first we wait until our own network store can return the record - // otherwise it may not be fully written yet - let mut retry_count = 0; - debug!("Checking we have successfully stored the fresh record {pretty_key:?} in the store before replicating"); - loop { - let record = match network.get_local_record(&paid_key).await { - Ok(record) => record, - Err(err) => { - error!( - "Replicating fresh record {pretty_key:?} get_record_from_store errored: {err:?}" - ); - None - } - }; - - if record.is_some() { - break; - } - - if retry_count > 10 { - error!( - "Could not get record from store for replication: {pretty_key:?} after 10 retries" - ); - return; - } - - retry_count += 1; - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - } - - debug!("Start replication of fresh record {pretty_key:?} from store"); - - // Already contains self_peer_id - let mut closest_k_peers = match network.get_closest_k_value_local_peers().await { - Ok(peers) => peers, - Err(err) => { - error!("Replicating fresh record {pretty_key:?} get_closest_local_peers errored: {err:?}"); - return; - } - }; - - // remove ourself from these calculations - closest_k_peers.retain(|peer_id| peer_id != &network.peer_id()); - - let data_addr = NetworkAddress::from_record_key(&paid_key); - - let sorted_based_on_addr = match sort_peers_by_address( - &closest_k_peers, - &data_addr, - REPLICATION_PEERS_COUNT, - ) { - Ok(result) => result, - Err(err) => { - error!( - "When replicating fresh record {pretty_key:?}, having error when sort {err:?}" - ); - return; - } - }; - - let our_peer_id = network.peer_id(); - let our_address = NetworkAddress::from_peer(our_peer_id); - let keys = vec![(data_addr.clone(), record_type.clone())]; - - for peer_id in sorted_based_on_addr { - debug!("Replicating fresh record {pretty_key:?} to {peer_id:?}"); - let request = Request::Cmd(Cmd::Replicate { - holder: our_address.clone(), - keys: keys.clone(), - }); - - network.send_req_ignore_reply(request, *peer_id); - } - debug!( - "Completed replicate fresh record {pretty_key:?} on store, in {:?}", - start.elapsed() - ); + network + .replicate_valid_fresh_record(paid_key, record_type) + .await; }); } } diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 8d06a87187..21ba72d619 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -13,18 +13,19 @@ // use common::client::{get_client_and_funded_wallet, get_wallet}; // use eyre::{bail, Result}; // use itertools::Itertools; -// use sn_transfers::{ -// get_genesis_sk, rng, NanoTokens, DerivationIndex, HotWallet, SignedTransaction, -// SpendReason, WalletError, GENESIS_CASHNOTE, -// }; // use sn_logging::LogBuilder; // use sn_networking::NetworkError; +// use sn_transfers::{ +// get_genesis_sk, rng, DerivationIndex, HotWallet, NanoTokens, SignedTransaction, SpendReason, +// WalletError, GENESIS_CASHNOTE, +// }; // use std::time::Duration; // use tracing::*; // #[tokio::test] // async fn cash_note_transfer_double_spend_fail() -> Result<()> { -// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let _log_guards = +// LogBuilder::init_single_threaded_tokio_test("cash_note_transfer_double_spend_fail", true); // // create 1 wallet add money from faucet // let first_wallet_dir = TempDir::new()?; @@ -40,7 +41,7 @@ // assert_eq!(third_wallet.balance(), NanoTokens::zero()); // // manually forge two transfers of the same source -// let amount = first_wallet_balance / 3; +// let amount = NanoTokens::from(first_wallet_balance / 3); // let to1 = first_wallet.address(); // let to2 = second_wallet.address(); // let to3 = third_wallet.address(); @@ -70,31 +71,50 @@ // )?; // // send both transfers to the network -// // upload won't error out, only error out during verification. + // info!("Sending both transfers to the network..."); -// let res = client.send_spends(transfer_to_2.spends.iter(), false).await; -// assert!(res.is_ok()); -// let res = client.send_spends(transfer_to_3.spends.iter(), false).await; -// assert!(res.is_ok()); +// // These may error (but may not depending on network speed) +// // so we're not going to rely on it here. +// let _ = client.send_spends(transfer_to_2.spends.iter(), true).await; -// // we wait 5s to ensure that the double spend attempt is detected and accumulated -// info!("Verifying the transfers from first wallet... Sleeping for 10 seconds."); -// tokio::time::sleep(Duration::from_secs(10)).await; +// let _ = client.send_spends(transfer_to_3.spends.iter(), true).await; + +// // check the CashNotes, it should fail +// info!("Verifying the transfers from first wallet..."); // let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); // let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); -// // check the CashNotes, it should fail -// let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; -// let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; -// info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); +// let mut should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; +// let mut should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; + +// for i in 0..5 { +// if should_err1.is_err() && should_err2.is_err() { +// break; +// } + +// tokio::time::sleep(Duration::from_secs(1)).await; +// info!("Retrying verification.{i}... for should_err1+2"); +// println!("Retrying verification{i} ... for should_err1+2"); +// should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; +// should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; +// } + +// info!("Both should fail during GET record accumulation + Double SpendAttempt should be flagged: {should_err1:?} {should_err2:?}"); +// println!("Both should fail during GET record accumulation + Double SpendAttempt should be flagged: {should_err1:?} {should_err2:?}"); // assert!(should_err1.is_err() && should_err2.is_err()); -// assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); -// assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); + +// assert_eq!( +// format!("{should_err1:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpend error, was: {should_err1:?}" +// ); + +// assert_eq!( +// format!("{should_err2:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpend error, was: {should_err2:?}" +// ); // Ok(()) // } @@ -168,7 +188,7 @@ // )?; // // send the transfer to the network which should reject it -// let res = client.send_spends(transfer2.spends.iter(), false).await; +// let res = client.send_spends(transfer2.spends.iter(), true).await; // std::mem::drop(exclusive_access); // assert_matches!(res, Err(WalletError::CouldNotSendMoney(_))); @@ -184,8 +204,8 @@ // let wallet_dir_1 = TempDir::new()?; // let (client, mut wallet_1) = get_client_and_funded_wallet(wallet_dir_1.path()).await?; -// let balance_1 = wallet_1.balance(); -// let amount = balance_1 / 2; +// let balance_1 = wallet_1.balance().as_nano(); +// let amount = NanoTokens::from(balance_1 / 2); // let to1 = wallet_1.address(); // // Send from 1 -> 2 @@ -262,14 +282,18 @@ // reason.clone(), // wallet_1.key(), // )?; // reuse the old cash notes -// client -// .send_spends(transfer_to_3.spends.iter(), false) -// .await?; +// // ignore response in case it errors out early, we verify below +// let _res = client.send_spends(transfer_to_3.spends.iter(), true).await; // info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); // let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); -// assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned + +// let res = client.verify_cashnote(&cash_notes_for_3[0]).await; +// assert!(res.is_err(), "should be error, was {res:?}"); // the old spend has been poisoned + // info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); -// assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned + +// let res = client.verify_cashnote(&cash_notes_for_2[0]).await; +// assert!(res.is_err(), "should be error, was {res:?}"); // the old spend has been poisoned // // The old spend has been poisoned, but spends from 22 -> 222 should still work // let wallet_dir_222 = TempDir::new()?; @@ -300,16 +324,16 @@ // client.verify_cashnote(&cash_notes_for_222[0]).await?; // // finally assert that we have a double spend attempt error here -// // we wait 1s to ensure that the double spend attempt is detected and accumulated +// // we wait to ensure that the double spend attempt is detected and accumulated // tokio::time::sleep(Duration::from_secs(5)).await; // match client.verify_cashnote(&cash_notes_for_2[0]).await { // Ok(_) => bail!("Cashnote verification should have failed"), // Err(e) => { -// assert!( -// e.to_string() -// .contains("Network Error Double spend(s) attempt was detected"), -// "error should reflect double spend attempt", +// assert_eq!( +// e.to_string(), +// format!("{}", WalletError::BurntSpend), +// "error should reflect double spend attempt was: {e:?}", // ); // } // } @@ -317,10 +341,10 @@ // match client.verify_cashnote(&cash_notes_for_3[0]).await { // Ok(_) => bail!("Cashnote verification should have failed"), // Err(e) => { -// assert!( -// e.to_string() -// .contains("Network Error Double spend(s) attempt was detected"), -// "error should reflect double spend attempt", +// assert_eq!( +// e.to_string(), +// format!("{}", WalletError::BurntSpend), +// "error should reflect double spend attempt was: {e:?}", // ); // } // } @@ -339,7 +363,7 @@ // let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; // let balance_a = wallet_a.balance().as_nano(); -// let amount = balance_a / 2; +// let amount = NanoTokens::from(balance_a / 2); // // Send from A -> B // let wallet_dir_b = TempDir::new()?; @@ -428,12 +452,10 @@ // let result = client.verify_cashnote(&cash_notes_for_x[0]).await; // info!("Got result while verifying double spend from A -> X: {result:?}"); -// // sleep for a bit to allow the network to process and accumulate the double spend -// tokio::time::sleep(Duration::from_secs(10)).await; - -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); // poisoned +// assert!( +// format!("{result:?}").starts_with("Err(UnexpectedParentSpends"), +// "Should have been UnexpectedParentSpends error, was: {result:?}" +// ); // // Try to double spend from B -> Y // let wallet_dir_y = TempDir::new()?; @@ -470,32 +492,48 @@ // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; // info!("Got result while verifying double spend from B -> Y: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); // info!("Verifying the original cashnote of A -> B"); + +// // arbitrary time sleep to allow for network accumulation of double spend. +// tokio::time::sleep(Duration::from_secs(1)).await; + // let result = client.verify_cashnote(&cash_notes_for_b[0]).await; // info!("Got result while verifying the original spend from A -> B: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); + +// println!("Verifying the original cashnote of B -> C"); -// info!("Verifying the original cashnote of B -> C"); // let result = client.verify_cashnote(&cash_notes_for_c[0]).await; // info!("Got result while verifying the original spend from B -> C: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }, "result should be verify error, it was {result:?}"); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }, "result should be verify error, it was {result:?}"); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); + // let result = client.verify_cashnote(&cash_notes_for_b[0]).await; -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }, "result should be verify error, it was {result:?}"); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); // Ok(()) // } @@ -511,8 +549,8 @@ // let wallet_dir_a = TempDir::new()?; // let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; -// let balance_a = wallet_a.balance(); -// let amount = balance_a / 2; +// let balance_a = wallet_a.balance().as_nano(); +// let amount = NanoTokens::from(balance_a / 2); // // Send from A -> B // let wallet_dir_b = TempDir::new()?; @@ -574,7 +612,7 @@ // )?; // client -// .send_spends(transfer_to_c.spends.iter(), false) +// .send_spends(transfer_to_c.spends.iter(), true) // .await?; // info!("Verifying the transfers from B -> C wallet..."); @@ -611,9 +649,10 @@ // let result = client.verify_cashnote(&cash_notes_for_x[0]).await; // info!("Got result while verifying double spend from A -> X: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend) +// ); // // the original A should still be present as one of the double spends // let res = client @@ -649,20 +688,23 @@ // reason.clone(), // wallet_a.key(), // )?; // reuse the old cash notes -// client -// .send_spends(transfer_to_y.spends.iter(), false) -// .await?; + +// // we actually don't care about the result here, we just want to spam the network with double spends +// let _ = client.send_spends(transfer_to_y.spends.iter(), false).await; + +// // and then we verify the double spend attempt // info!("Verifying the transfers from A -> Y wallet... It should error out."); // let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); // // sleep for a bit to allow the network to process and accumulate the double spend -// tokio::time::sleep(Duration::from_millis(500)).await; +// tokio::time::sleep(Duration::from_millis(1500)).await; // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; // info!("Got result while verifying double spend from A -> Y: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend) +// ); // // the original A should still be present as one of the double spends // let res = client diff --git a/sn_node/tests/storage_payments.rs b/sn_node/tests/storage_payments.rs index 23fe9c53b0..d36f680ca2 100644 --- a/sn_node/tests/storage_payments.rs +++ b/sn_node/tests/storage_payments.rs @@ -14,7 +14,6 @@ // use libp2p::PeerId; // use rand::Rng; // use sn_client::{Error as ClientError, FilesDownload, Uploader, WalletClient}; -// use sn_evm::{Amount, AttoTokens, PaymentQuote}; // use sn_logging::LogBuilder; // use sn_networking::{GetRecordError, NetworkError}; // use sn_protocol::{ @@ -23,6 +22,7 @@ // NetworkAddress, // }; // use sn_registers::Permissions; +// use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote}; // use std::collections::BTreeMap; // use tokio::time::{sleep, Duration}; // use tracing::info; @@ -80,7 +80,7 @@ // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); // let subset_len = chunks.len() / 3; -// let _storage_cost = wallet_client +// let res = wallet_client // .pay_for_storage( // chunks // .clone() @@ -88,7 +88,15 @@ // .take(subset_len) // .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(name))), // ) -// .await?; +// .await; + +// // if the payment failed, we can log that +// if let Err(error) = res { +// tracing::warn!( +// "Payment failed, (though that doesn't really break this test): {:?}", +// error +// ); +// } // // now let's request to upload all addresses, even that we've already paid for a subset of them // let verify_store = false; @@ -111,7 +119,7 @@ // let paying_wallet_dir: TempDir = TempDir::new()?; // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// let wallet_original_balance = paying_wallet.balance().as_atto(); +// let wallet_original_balance = paying_wallet.balance().as_nano(); // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); // // generate a random number (between 50 and 100) of random addresses @@ -135,10 +143,10 @@ // .ok_or(eyre!("Total storage cost exceed possible token amount"))?; // // check we've paid only for the subset of addresses, 1 nano per addr -// let new_balance = AttoTokens::from_atto(wallet_original_balance - total_cost.as_atto()); +// let new_balance = NanoTokens::from(wallet_original_balance - total_cost.as_nano()); // info!("Verifying new balance on paying wallet is {new_balance} ..."); // let paying_wallet = wallet_client.into_wallet(); -// // assert_eq!(paying_wallet.balance(), new_balance);// TODO adapt to evm +// assert_eq!(paying_wallet.balance(), new_balance); // // let's verify payment proofs for the subset have been cached in the wallet // assert!(random_content_addrs @@ -160,13 +168,12 @@ // .ok_or(eyre!("Total storage cost exceed possible token amount"))?; // // check we've paid only for addresses we haven't previously paid for, 1 nano per addr -// let new_balance = AttoTokens::from_atto( -// wallet_original_balance - (Amount::from(random_content_addrs.len()) * total_cost.as_atto()), +// let new_balance = NanoTokens::from( +// wallet_original_balance - (random_content_addrs.len() as u64 * total_cost.as_nano()), // ); // println!("Verifying new balance on paying wallet is now {new_balance} ..."); // let paying_wallet = wallet_client.into_wallet(); -// // TODO adapt to evm -// // assert_eq!(paying_wallet.balance(), new_balance); +// assert_eq!(paying_wallet.balance(), new_balance); // // let's verify payment proofs now for all addresses have been cached in the wallet // // assert!(random_content_addrs @@ -229,18 +236,16 @@ // no_data_payments.insert( // *chunk_name, // ( -// sn_evm::utils::dummy_address(), -// PaymentQuote::test_dummy(*chunk_name, AttoTokens::from_u64(0)), +// MainPubkey::new(bls::SecretKey::random().public_key()), +// PaymentQuote::test_dummy(*chunk_name, NanoTokens::from(0)), // PeerId::random().to_bytes(), // ), // ); // } -// // TODO adapt to evm -// // let _ = wallet_client -// // .mut_wallet() -// // .send_storage_payment(&no_data_payments) -// // .await?; +// let _ = wallet_client +// .mut_wallet() +// .local_send_storage_payment(&no_data_payments)?; // sleep(Duration::from_secs(5)).await; @@ -248,131 +253,131 @@ // .upload_test_bytes(content_bytes.clone(), false) // .await?; -// info!("Reading {content_addr:?} expected to fail"); -// let mut files_download = FilesDownload::new(files_api); -// assert!( -// matches!( -// files_download.download_file(content_addr, None).await, -// Err(ClientError::Network(NetworkError::GetRecordError( -// GetRecordError::RecordNotFound -// ))) -// ), -// "read bytes should fail as we didn't store them" -// ); +// // info!("Reading {content_addr:?} expected to fail"); +// // let mut files_download = FilesDownload::new(files_api); +// // assert!( +// // matches!( +// // files_download.download_file(content_addr, None).await, +// // Err(ClientError::Network(NetworkError::GetRecordError( +// // GetRecordError::RecordNotFound +// // ))) +// // ), +// // "read bytes should fail as we didn't store them" +// // ); -// Ok(()) -// } +// // Ok(()) +// // } -// #[tokio::test] -// async fn storage_payment_register_creation_succeeds() -> Result<()> { -// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// // #[tokio::test] +// // async fn storage_payment_register_creation_succeeds() -> Result<()> { +// // let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); -// let paying_wallet_dir = TempDir::new()?; +// // let paying_wallet_dir = TempDir::new()?; -// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); -// let mut rng = rand::thread_rng(); -// let xor_name = XorName::random(&mut rng); -// let address = RegisterAddress::new(xor_name, client.signer_pk()); -// let net_addr = NetworkAddress::from_register_address(address); -// info!("Paying for random Register address {net_addr:?} ..."); +// // let mut rng = rand::thread_rng(); +// // let xor_name = XorName::random(&mut rng); +// // let address = RegisterAddress::new(xor_name, client.signer_pk()); +// // let net_addr = NetworkAddress::from_register_address(address); +// // info!("Paying for random Register address {net_addr:?} ..."); -// let _cost = wallet_client -// .pay_for_storage(std::iter::once(net_addr)) -// .await?; +// // let _cost = wallet_client +// // .pay_for_storage(std::iter::once(net_addr)) +// // .await?; -// let (mut register, _cost, _royalties_fees) = client -// .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) -// .await?; +// // let (mut register, _cost, _royalties_fees) = client +// // .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) +// // .await?; -// println!("Newly created register has {} ops", register.read().len()); +// // println!("Newly created register has {} ops", register.read().len()); -// let retrieved_reg = client.get_register(address).await?; +// // let retrieved_reg = client.get_register(address).await?; -// assert_eq!(register.read(), retrieved_reg.read()); +// // assert_eq!(register.read(), retrieved_reg.read()); -// let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// // let random_entry = rng.gen::<[u8; 32]>().to_vec(); -// register.write(&random_entry)?; +// // register.write(&random_entry)?; -// println!( -// "Register has {} ops after first write", -// register.read().len() -// ); +// // println!( +// // "Register has {} ops after first write", +// // register.read().len() +// // ); -// register.sync(&mut wallet_client, true, None).await?; +// // register.sync(&mut wallet_client, true, None).await?; -// let retrieved_reg = client.get_register(address).await?; +// // let retrieved_reg = client.get_register(address).await?; -// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// // assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); -// assert_eq!(retrieved_reg.read().len(), 1); +// // assert_eq!(retrieved_reg.read().len(), 1); -// for index in 1..10 { -// println!("current index is {index}"); -// let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// // for index in 1..10 { +// // println!("current index is {index}"); +// // let random_entry = rng.gen::<[u8; 32]>().to_vec(); -// register.write(&random_entry)?; -// register.sync(&mut wallet_client, true, None).await?; +// // register.write(&random_entry)?; +// // register.sync(&mut wallet_client, true, None).await?; -// let retrieved_reg = client.get_register(address).await?; +// // let retrieved_reg = client.get_register(address).await?; -// println!( -// "current retrieved register entry length is {}", -// retrieved_reg.read().len() -// ); -// println!("current expected entry length is {}", register.read().len()); +// // println!( +// // "current retrieved register entry length is {}", +// // retrieved_reg.read().len() +// // ); +// // println!("current expected entry length is {}", register.read().len()); -// println!( -// "current retrieved register ops length is {}", -// retrieved_reg.ops.len() -// ); -// println!("current local cached ops length is {}", register.ops.len()); +// // println!( +// // "current retrieved register ops length is {}", +// // retrieved_reg.ops.len() +// // ); +// // println!("current local cached ops length is {}", register.ops.len()); -// assert_eq!(retrieved_reg.read().len(), register.read().len()); +// // assert_eq!(retrieved_reg.read().len(), register.read().len()); -// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// // assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); -// println!("Current fetched register is {:?}", retrieved_reg.register); -// println!( -// "Fetched register has update history of {}", -// retrieved_reg.register.log_update_history() -// ); +// // println!("Current fetched register is {:?}", retrieved_reg.register); +// // println!( +// // "Fetched register has update history of {}", +// // retrieved_reg.register.log_update_history() +// // ); -// std::thread::sleep(std::time::Duration::from_millis(1000)); -// } +// // std::thread::sleep(std::time::Duration::from_millis(1000)); +// // } -// Ok(()) -// } +// // Ok(()) +// // } -// #[tokio::test] -// #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] -// async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { -// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// // #[tokio::test] +// // #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] +// // async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { +// // let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); -// let paying_wallet_dir = TempDir::new()?; +// // let paying_wallet_dir = TempDir::new()?; -// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); -// let mut rng = rand::thread_rng(); -// let xor_name = XorName::random(&mut rng); -// let address = RegisterAddress::new(xor_name, client.signer_pk()); -// let net_address = -// NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); +// // let mut rng = rand::thread_rng(); +// // let xor_name = XorName::random(&mut rng); +// // let address = RegisterAddress::new(xor_name, client.signer_pk()); +// // let net_address = +// // NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); -// let mut no_data_payments = BTreeMap::default(); -// no_data_payments.insert( -// net_address -// .as_xorname() -// .expect("RegisterAddress should convert to XorName"), -// ( -// sn_evm::utils::dummy_address(), -// PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), -// vec![], -// ), -// ); +// // let mut no_data_payments = BTreeMap::default(); +// // no_data_payments.insert( +// // net_address +// // .as_xorname() +// // .expect("RegisterAddress should convert to XorName"), +// // ( +// // sn_evm::utils::dummy_address(), +// // PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), +// // vec![], +// // ), +// // ); // println!( // "current retrieved register entry length is {}", @@ -395,16 +400,16 @@ // // .send_storage_payment(&no_data_payments) // // .await?; -// // this should fail to store as the amount paid is not enough -// let (mut register, _cost, _royalties_fees) = client -// .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) -// .await?; +// // // this should fail to store as the amount paid is not enough +// // let (mut register, _cost, _royalties_fees) = client +// // .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) +// // .await?; -// sleep(Duration::from_secs(5)).await; -// assert!(matches!( -// client.get_register(address).await, -// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address -// )); +// // sleep(Duration::from_secs(5)).await; +// // assert!(matches!( +// // client.get_register(address).await, +// // Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// // )); // println!("Current fetched register is {:?}", retrieved_reg.address()); // println!( @@ -415,11 +420,11 @@ // let random_entry = rng.gen::<[u8; 32]>().to_vec(); // register.write(&random_entry)?; -// sleep(Duration::from_secs(5)).await; -// assert!(matches!( -// register.sync(&mut wallet_client, false, None).await, -// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address -// )); +// // sleep(Duration::from_secs(5)).await; +// // assert!(matches!( +// // register.sync(&mut wallet_client, false, None).await, +// // Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// // )); -// Ok(()) -// } +// // Ok(()) +// // } diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index 641756fa2c..8649d07909 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -16,13 +16,10 @@ use common::{ get_all_peer_ids, get_safenode_rpc_client, NodeRestart, }; use eyre::{eyre, Result}; -use libp2p::{ - kad::{KBucketKey, RecordKey}, - PeerId, -}; +use libp2p::{kad::RecordKey, PeerId}; use rand::{rngs::OsRng, Rng}; use sn_logging::LogBuilder; -use sn_networking::{sleep, sort_peers_by_key}; +use sn_networking::{sleep, sort_peers_by_address_and_limit, sort_peers_by_key_and_limit}; use sn_protocol::{ safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, @@ -160,8 +157,8 @@ fn print_node_close_groups(all_peers: &[PeerId]) { for (node_index, peer) in all_peers.iter().enumerate() { let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); - let closest_peers = - sort_peers_by_key(&all_peers, &key, CLOSE_GROUP_SIZE).expect("failed to sort peer"); + let closest_peers = sort_peers_by_key_and_limit(&all_peers, &key, CLOSE_GROUP_SIZE) + .expect("failed to sort peer"); let closest_peers_idx = closest_peers .iter() .map(|&&peer| { @@ -212,11 +209,12 @@ async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAd for (key, actual_holders_idx) in record_holders.iter() { println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); - let record_key = KBucketKey::from(key.to_vec()); - let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? - .into_iter() - .cloned() - .collect::>(); + let record_address = NetworkAddress::from_record_key(key); + let expected_holders = + sort_peers_by_address_and_limit(all_peers, &record_address, CLOSE_GROUP_SIZE)? + .into_iter() + .cloned() + .collect::>(); let actual_holders = actual_holders_idx .iter() diff --git a/sn_node/tests/verify_routing_table.rs b/sn_node/tests/verify_routing_table.rs index da19270b69..85dc2e3a09 100644 --- a/sn_node/tests/verify_routing_table.rs +++ b/sn_node/tests/verify_routing_table.rs @@ -26,7 +26,7 @@ use tracing::{error, info, trace}; /// Sleep for sometime for the nodes for discover each other before verification /// Also can be set through the env variable of the same name. -const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(5); +const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(60); #[tokio::test(flavor = "multi_thread")] async fn verify_routing_table() -> Result<()> { diff --git a/sn_protocol/src/error.rs b/sn_protocol/src/error.rs index f73c356b53..8462ff85f3 100644 --- a/sn_protocol/src/error.rs +++ b/sn_protocol/src/error.rs @@ -78,4 +78,7 @@ pub enum Error { // The record already exists at this node #[error("The record already exists, so do not charge for it: {0:?}")] RecordExists(PrettyPrintRecordKey<'static>), + + #[error("Record header is incorrect")] + IncorrectRecordHeader, } diff --git a/sn_protocol/src/storage.rs b/sn_protocol/src/storage.rs index 2935e43fce..3a6b4ba6a8 100644 --- a/sn_protocol/src/storage.rs +++ b/sn_protocol/src/storage.rs @@ -18,7 +18,10 @@ use std::{str::FromStr, time::Duration}; pub use self::{ address::{ChunkAddress, RegisterAddress, ScratchpadAddress, SpendAddress}, chunks::Chunk, - header::{try_deserialize_record, try_serialize_record, RecordHeader, RecordKind, RecordType}, + header::{ + get_type_from_record, try_deserialize_record, try_serialize_record, RecordHeader, + RecordKind, RecordType, + }, scratchpad::Scratchpad, }; diff --git a/sn_protocol/src/storage/header.rs b/sn_protocol/src/storage/header.rs index 96a4515526..af43c21256 100644 --- a/sn_protocol/src/storage/header.rs +++ b/sn_protocol/src/storage/header.rs @@ -84,6 +84,33 @@ impl Display for RecordKind { } } +/// Return the RecordType +pub fn get_type_from_record(record: &Record) -> Result { + let key = record.key.clone(); + let record_key = PrettyPrintRecordKey::from(&key); + + match RecordHeader::from_record(record) { + Ok(record_header) => match record_header.kind { + RecordKind::Chunk => Ok(RecordType::Chunk), + RecordKind::Scratchpad => Ok(RecordType::Scratchpad), + RecordKind::Spend | RecordKind::Register => { + let content_hash = XorName::from_content(&record.value); + Ok(RecordType::NonChunk(content_hash)) + } + RecordKind::ChunkWithPayment + | RecordKind::RegisterWithPayment + | RecordKind::ScratchpadWithPayment => { + error!("Record {record_key:?} with payment shall not be stored locally."); + Err(Error::IncorrectRecordHeader) + } + }, + Err(err) => { + error!("For record {record_key:?}, failed to parse record_header {err:?}"); + Err(Error::IncorrectRecordHeader) + } + } +} + impl RecordHeader { pub const SIZE: usize = 2; diff --git a/sn_transfers/src/wallet/error.rs b/sn_transfers/src/wallet/error.rs index 5a57b7434a..f60b718f42 100644 --- a/sn_transfers/src/wallet/error.rs +++ b/sn_transfers/src/wallet/error.rs @@ -40,9 +40,19 @@ pub enum Error { /// A general error when receiving a transfer fails #[error("Failed to receive transfer due to {0}")] CouldNotReceiveMoney(String), + /// A spend has been burnt (ie there was a DoubleSpendAttempt) + #[error("Failed to verify transfer validity in the network, a burnt SpendAttempt was found")] + BurntSpend, + /// Parents of a spend were not as expected in a provided cash note + #[error("Failed to verify transfer's parents in the network, transfer could be invalid or a parent double spent")] + UnexpectedParentSpends(crate::SpendAddress), + ///No valid unspent cashnotes found + #[error("All the redeemed CashNotes are already spent")] + AllRedeemedCashnotesSpent, /// A general error when verifying a transfer validity in the network #[error("Failed to verify transfer validity in the network {0}")] CouldNotVerifyTransfer(String), + /// Failed to fetch spend from network #[error("Failed to fetch spend from network: {0}")] FailedToGetSpend(String), From 9d55e30463f689f39045f397dad76ffcd191c23f Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 23 Oct 2024 18:30:10 +0800 Subject: [PATCH 053/128] Revert "feat(autonomi): download either a file or directory based on data" This reverts commit 04ec14e1718193bcc76b5c07b902161b5210d4a6. --- autonomi-cli/src/actions/download.rs | 52 +++++++++++++++++++++++ autonomi-cli/src/actions/mod.rs | 4 ++ autonomi-cli/src/actions/progress_bar.rs | 1 - autonomi-cli/src/commands.rs | 3 +- autonomi-cli/src/commands/file.rs | 40 +++++------------- autonomi/src/client/archive.rs | 4 +- autonomi/src/client/fs.rs | 54 +++++------------------- autonomi/src/client/mod.rs | 1 - autonomi/tests/fs.rs | 9 ++-- 9 files changed, 84 insertions(+), 84 deletions(-) create mode 100644 autonomi-cli/src/actions/download.rs diff --git a/autonomi-cli/src/actions/download.rs b/autonomi-cli/src/actions/download.rs new file mode 100644 index 0000000000..ba004930e3 --- /dev/null +++ b/autonomi-cli/src/actions/download.rs @@ -0,0 +1,52 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use super::get_progress_bar; +use autonomi::{client::address::str_to_addr, Client}; +use color_eyre::eyre::{eyre, Context, Result}; +use std::path::PathBuf; + +pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Result<()> { + let address = str_to_addr(addr).wrap_err("Failed to parse data address")?; + let archive = client + .archive_get(address) + .await + .wrap_err("Failed to fetch data from address")?; + + let progress_bar = get_progress_bar(archive.map.len() as u64)?; + let mut all_errs = vec![]; + for (path, addr) in archive.map { + progress_bar.println(format!("Fetching file: {path:?}...")); + let bytes = match client.data_get(addr).await { + Ok(bytes) => bytes, + Err(e) => { + let err = format!("Failed to fetch file {path:?}: {e}"); + all_errs.push(err); + continue; + } + }; + + let path = PathBuf::from(dest_path).join(path); + let here = PathBuf::from("."); + let parent = path.parent().unwrap_or_else(|| &here); + std::fs::create_dir_all(parent)?; + std::fs::write(path, bytes)?; + progress_bar.clone().inc(1); + } + progress_bar.finish_and_clear(); + + if all_errs.is_empty() { + println!("Successfully downloaded data at: {addr}"); + Ok(()) + } else { + let err_no = all_errs.len(); + eprintln!("{err_no} errors while downloading data at: {addr}"); + eprintln!("{all_errs:#?}"); + Err(eyre!("Errors while downloading data")) + } +} diff --git a/autonomi-cli/src/actions/mod.rs b/autonomi-cli/src/actions/mod.rs index 98ef491064..8b4662c3d9 100644 --- a/autonomi-cli/src/actions/mod.rs +++ b/autonomi-cli/src/actions/mod.rs @@ -7,6 +7,10 @@ // permissions and limitations relating to use of the SAFE Network Software. mod connect; +mod download; mod progress_bar; pub use connect::connect_to_network; +pub use download::download; + +pub use progress_bar::get_progress_bar; diff --git a/autonomi-cli/src/actions/progress_bar.rs b/autonomi-cli/src/actions/progress_bar.rs index 5e2c6c914e..2fcfe0ba20 100644 --- a/autonomi-cli/src/actions/progress_bar.rs +++ b/autonomi-cli/src/actions/progress_bar.rs @@ -10,7 +10,6 @@ use color_eyre::eyre::Result; use indicatif::{ProgressBar, ProgressStyle}; use std::time::Duration; -#[allow(dead_code)] pub fn get_progress_bar(length: u64) -> Result { let progress_bar = ProgressBar::new(length); progress_bar.set_style( diff --git a/autonomi-cli/src/commands.rs b/autonomi-cli/src/commands.rs index 4c2067aa87..bb718df43a 100644 --- a/autonomi-cli/src/commands.rs +++ b/autonomi-cli/src/commands.rs @@ -9,7 +9,6 @@ mod file; mod register; mod vault; -use std::path::PathBuf; use clap::Subcommand; use color_eyre::Result; @@ -56,7 +55,7 @@ pub enum FileCmd { /// The address of the file to download. addr: String, /// The destination file path. - dest_file: PathBuf, + dest_file: String, }, /// List previous uploads diff --git a/autonomi-cli/src/commands/file.rs b/autonomi-cli/src/commands/file.rs index bfa4719460..d99a848214 100644 --- a/autonomi-cli/src/commands/file.rs +++ b/autonomi-cli/src/commands/file.rs @@ -8,11 +8,9 @@ use crate::utils::collect_upload_summary; use autonomi::client::address::addr_to_str; -use autonomi::client::address::str_to_addr; use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::eyre::Result; -use std::path::Path; use std::path::PathBuf; pub async fn cost(file: &str, peers: Vec) -> Result<()> { @@ -28,35 +26,22 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { println!("Total cost: {cost}"); Ok(()) } -pub async fn upload(path: &str, peers: Vec) -> Result<()> { +pub async fn upload(file: &str, peers: Vec) -> Result<()> { let wallet = crate::keys::load_evm_wallet()?; let mut client = crate::actions::connect_to_network(peers).await?; let event_receiver = client.enable_client_events(); let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); - let path = PathBuf::from(path); - - let xor_name = if path.is_dir() { - println!("Uploading directory: {path:?}"); - info!("Uploading directory: {path:?}"); - client - .dir_upload(&path, &wallet) - .await - .wrap_err("Failed to upload directory")? - } else { - println!("Uploading file: {path:?}"); - info!("Uploading file: {path:?}"); - client - .file_upload(&path, &wallet) - .await - .wrap_err("Failed to upload file")? - }; + println!("Uploading data to network..."); + let xor_name = client + .dir_upload(PathBuf::from(file), &wallet) + .await + .wrap_err("Failed to upload file")?; let addr = addr_to_str(xor_name); - println!("Successfully uploaded: {path:?}"); + println!("Successfully uploaded: {file}"); println!("At address: {addr}"); - info!("Successfully uploaded: {path:?} at address: {addr}"); if let Ok(()) = upload_completed_tx.send(()) { let summary = upload_summary_thread.await?; if summary.record_count == 0 { @@ -65,18 +50,13 @@ pub async fn upload(path: &str, peers: Vec) -> Result<()> { println!("Number of chunks uploaded: {}", summary.record_count); println!("Total cost: {} AttoTokens", summary.tokens_spent); } - info!("Summary for upload of data {path:?} at {addr:?}: {summary:?}"); } Ok(()) } -pub async fn download(addr: &str, dest_path: &Path, peers: Vec) -> Result<()> { - let client = crate::actions::connect_to_network(peers).await?; - let address = str_to_addr(addr).wrap_err("Failed to parse data address")?; - - client.download_file_or_dir(address, dest_path).await?; - - Ok(()) +pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Result<()> { + let mut client = crate::actions::connect_to_network(peers).await?; + crate::actions::download(addr, dest_path, &mut client).await } pub fn list(_peers: Vec) -> Result<()> { diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs index f38ca24cbc..d3cf9714ec 100644 --- a/autonomi/src/client/archive.rs +++ b/autonomi/src/client/archive.rs @@ -30,7 +30,7 @@ pub struct Archive { impl Archive { /// Deserialize from bytes. - pub fn from_bytes(data: &Bytes) -> Result { + pub fn from_bytes(data: Bytes) -> Result { let root: Archive = rmp_serde::from_slice(&data[..])?; Ok(root) @@ -49,7 +49,7 @@ impl Client { /// Fetch an archive from the network pub async fn archive_get(&self, addr: ArchiveAddr) -> Result { let data = self.data_get(addr).await?; - Ok(Archive::from_bytes(&data)?) + Ok(Archive::from_bytes(data)?) } /// Upload an archive to the network diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index 674e03fc2b..8fff06324c 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -10,7 +10,7 @@ use crate::client::Client; use bytes::Bytes; use sn_evm::EvmWallet; use std::collections::HashMap; -use std::path::Path; +use std::path::PathBuf; use super::archive::{Archive, ArchiveAddr}; use super::data::{DataAddr, GetError, PutError}; @@ -48,7 +48,7 @@ impl Client { pub async fn file_download( &self, data_addr: DataAddr, - to_dest: &Path, + to_dest: PathBuf, ) -> Result<(), DownloadError> { let data = self.data_get(data_addr).await?; if let Some(parent) = to_dest.parent() { @@ -62,52 +62,20 @@ impl Client { pub async fn dir_download( &self, archive_addr: ArchiveAddr, - to_dest: &Path, + to_dest: PathBuf, ) -> Result<(), DownloadError> { let archive = self.archive_get(archive_addr).await?; for (path, addr) in archive.map { - self.file_download(addr, &to_dest.join(path)).await?; + self.file_download(addr, to_dest.join(path)).await?; } Ok(()) } - /// Download either a file or a directory depending on the data present at the provided address. - pub async fn download_file_or_dir( - &self, - address: DataAddr, - to_dest: &Path, - ) -> Result<(), DownloadError> { - let data = self.data_get(address).await?; - - if let Ok(archive) = Archive::from_bytes(&data) { - info!("Got an Archive from bytes, unpacking directory to {to_dest:?}"); - for (path, addr) in archive.map { - let dest = to_dest.join(path); - - #[cfg(feature = "loud")] - println!("Downloading file: {addr:?} to {dest:?}"); - - debug!("Downloading archived file: {addr:?} to {dest:?}"); - self.file_download(addr, &dest).await?; - } - } else { - info!("The downloaded data is not an Archive, saving it as a file."); - #[cfg(feature = "loud")] - println!("Downloading file: {address:?} to {to_dest:?}"); - if let Some(parent) = to_dest.parent() { - tokio::fs::create_dir_all(parent).await?; - } - tokio::fs::write(to_dest, data).await?; - } - - Ok(()) - } - /// Upload a directory to the network. The directory is recursively walked. /// Reads all files, splits into chunks, uploads chunks, uploads datamaps, uploads archive, returns ArchiveAddr (pointing to the archive) pub async fn dir_upload( &self, - dir_path: &Path, + dir_path: PathBuf, wallet: &EvmWallet, ) -> Result { let mut map = HashMap::new(); @@ -119,13 +87,13 @@ impl Client { continue; } - let path = entry.path(); + let path = entry.path().to_path_buf(); tracing::info!("Uploading file: {path:?}"); #[cfg(feature = "loud")] println!("Uploading file: {path:?}"); - let file = self.file_upload(path, wallet).await?; + let file = self.file_upload(path.clone(), wallet).await?; - map.insert(path.to_path_buf(), file); + map.insert(path, file); } let archive = Archive { map }; @@ -138,9 +106,9 @@ impl Client { /// Upload a file to the network. /// Reads file, splits into chunks, uploads chunks, uploads datamap, returns DataAddr (pointing to the datamap) - pub async fn file_upload( + async fn file_upload( &self, - path: &Path, + path: PathBuf, wallet: &EvmWallet, ) -> Result { let data = tokio::fs::read(path).await?; @@ -151,7 +119,7 @@ impl Client { /// Get the cost to upload a file/dir to the network. /// quick and dirty implementation, please refactor once files are cleanly implemented - pub async fn file_cost(&self, path: &Path) -> Result { + pub async fn file_cost(&self, path: &PathBuf) -> Result { let mut map = HashMap::new(); let mut total_cost = sn_evm::Amount::ZERO; diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 68dfe0d50a..f19216fe84 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -202,7 +202,6 @@ pub enum ClientEvent { } /// Summary of an upload operation. -#[derive(Debug, Clone)] pub struct UploadSummary { pub record_count: usize, pub tokens_spent: Amount, diff --git a/autonomi/tests/fs.rs b/autonomi/tests/fs.rs index 4c286725aa..5b1fce533b 100644 --- a/autonomi/tests/fs.rs +++ b/autonomi/tests/fs.rs @@ -14,7 +14,6 @@ use sha2::{Digest, Sha256}; use sn_logging::LogBuilder; use std::fs::File; use std::io::{BufReader, Read}; -use std::path::PathBuf; use std::time::Duration; use test_utils::{evm::get_funded_wallet, peers_from_env}; use tokio::time::sleep; @@ -31,13 +30,13 @@ async fn dir_upload_download() -> Result<()> { let wallet = get_funded_wallet(); let addr = client - .dir_upload(&PathBuf::from("tests/file/test_dir"), &wallet) + .dir_upload("tests/file/test_dir".into(), &wallet) .await?; sleep(Duration::from_secs(10)).await; client - .dir_download(addr, &PathBuf::from("tests/file/test_dir_fetched")) + .dir_download(addr, "tests/file/test_dir_fetched".into()) .await?; // compare the two directories @@ -87,7 +86,7 @@ async fn file_into_vault() -> Result<()> { let client_sk = bls::SecretKey::random(); let addr = client - .dir_upload(&PathBuf::from("tests/file/test_dir"), &wallet) + .dir_upload("tests/file/test_dir".into(), &wallet) .await?; sleep(Duration::from_secs(2)).await; @@ -100,7 +99,7 @@ async fn file_into_vault() -> Result<()> { let new_client = Client::connect(&[]).await?; if let Some(ap) = new_client.fetch_and_decrypt_vault(&client_sk).await? { - let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(&ap)?; + let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(ap)?; assert_eq!( archive.map, ap_archive_fetched.map, From 7341105f397668a60f3a76b8005e492521f5684c Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 23 Oct 2024 18:30:39 +0800 Subject: [PATCH 054/128] Revert "Revert "Merge pull request #2243 from maqi/record_store_dir_prefixed_with_network_keys"" This reverts commit 27362f3f71e32b5a942da0b81ca8ffa9ff380891. --- sn_networking/src/driver.rs | 7 ++++--- sn_protocol/src/version.rs | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index f432d231fc..a895655650 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -53,8 +53,8 @@ use sn_protocol::{ messages::{ChunkProof, Nonce, Request, Response}, storage::{try_deserialize_record, RetryStrategy}, version::{ - IDENTIFY_CLIENT_VERSION_STR, IDENTIFY_NODE_VERSION_STR, IDENTIFY_PROTOCOL_STR, - REQ_RESPONSE_VERSION_STR, + get_key_version_str, IDENTIFY_CLIENT_VERSION_STR, IDENTIFY_NODE_VERSION_STR, + IDENTIFY_PROTOCOL_STR, REQ_RESPONSE_VERSION_STR, }, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, }; @@ -360,7 +360,8 @@ impl NetworkBuilder { let store_cfg = { // Configures the disk_store to store records under the provided path and increase the max record size - let storage_dir_path = root_dir.join("record_store"); + // The storage dir is appendixed with key_version str to avoid bringing records from old network into new + let storage_dir_path = root_dir.join(format!("record_store_{}", get_key_version_str())); if let Err(error) = std::fs::create_dir_all(&storage_dir_path) { return Err(NetworkError::FailedToCreateRecordStoreDir { path: storage_dir_path, diff --git a/sn_protocol/src/version.rs b/sn_protocol/src/version.rs index 04921730ef..e1c952976c 100644 --- a/sn_protocol/src/version.rs +++ b/sn_protocol/src/version.rs @@ -58,7 +58,7 @@ fn get_truncate_version_str() -> String { /// Get the PKs version string. /// If the public key mis-configed via env variable, /// it shall result in being rejected to join by the network -fn get_key_version_str() -> String { +pub fn get_key_version_str() -> String { let mut f_k_str = FOUNDATION_PK.to_hex(); let _ = f_k_str.split_off(6); let mut g_k_str = GENESIS_PK.to_hex(); From e74961b1831a5b167a56fea9f74d210823b375ed Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Wed, 23 Oct 2024 15:09:16 +0200 Subject: [PATCH 055/128] feat(autonomi): add `uploaded` metadata --- autonomi/src/client/archive.rs | 7 ++++++- autonomi/src/client/fs.rs | 10 +++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs index aa5301cfeb..3957b3d942 100644 --- a/autonomi/src/client/archive.rs +++ b/autonomi/src/client/archive.rs @@ -40,10 +40,14 @@ pub struct Archive { map: HashMap, } -/// Metadata for a file in an archive +/// Metadata for a file in an archive. Time values are UNIX timestamps. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Metadata { + /// When the file was (last) uploaded to the network. + pub uploaded: u64, + /// File creation time on local file system. See [`std::fs::Metadata::created`] for details per OS. pub created: u64, + /// Last file modification time taken from local file system. See [`std::fs::Metadata::modified`] for details per OS. pub modified: u64, } @@ -55,6 +59,7 @@ impl Metadata { .unwrap_or(Duration::from_secs(0)) .as_secs(); Self { + uploaded: now, created: now, modified: now, } diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index ba62d355fe..51311e2f70 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -188,6 +188,7 @@ fn metadata_from_entry(entry: &walkdir::DirEntry) -> Metadata { entry.path().display() ); return Metadata { + uploaded: 0, created: 0, modified: 0, }; @@ -215,5 +216,12 @@ fn metadata_from_entry(entry: &walkdir::DirEntry) -> Metadata { let created = unix_time("created", fs_metadata.created()); let modified = unix_time("modified", fs_metadata.modified()); - Metadata { created, modified } + Metadata { + uploaded: SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or(Duration::from_secs(0)) + .as_secs(), + created, + modified, + } } From 8c8b7da3bd549f8de627ab0d375c228627af9ad1 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 23 Oct 2024 23:04:11 +0800 Subject: [PATCH 056/128] chore(autonomi): resolve wasm build error due to merge --- autonomi/src/client/wasm.rs | 5 ++--- sn_networking/src/lib.rs | 1 + 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index 03084f59dc..bbbb3461db 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -136,7 +136,6 @@ mod archive { #[cfg(feature = "vault")] mod vault { use super::*; - use bls::SecretKey; #[wasm_bindgen(js_name = UserData)] pub struct JsUserData(UserData); @@ -306,7 +305,7 @@ pub struct JsWallet(evmlib::wallet::Wallet); /// Get a funded wallet for testing. This either uses a default private key or the `EVM_PRIVATE_KEY` /// environment variable that was used during the build process of this library. #[wasm_bindgen(js_name = getFundedWallet)] -pub fn funded_wallet() -> Wallet { +pub fn funded_wallet() -> JsWallet { let network = evmlib::utils::get_evm_network_from_env() .expect("Failed to get EVM network from environment variables"); if matches!(network, evmlib::Network::ArbitrumOne) { @@ -321,7 +320,7 @@ pub fn funded_wallet() -> Wallet { let wallet = evmlib::wallet::Wallet::new_from_private_key(network, &private_key) .expect("Invalid private key"); - Wallet(wallet) + JsWallet(wallet) } /// Enable tracing logging in the console. diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index e47f593838..a275567c05 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -761,6 +761,7 @@ impl Network { /// Handle the split record error. /// Spend: Accumulate spends and return error if more than one. /// Register: Merge registers and return the merged record. + #[cfg(not(target_arch = "wasm32"))] fn handle_split_record_error( result_map: &HashMap)>, key: &RecordKey, From 7b60628717f63542e0a73c9f81ee2018bb686a19 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 23 Oct 2024 21:37:31 +0800 Subject: [PATCH 057/128] chore(CI): re-enable benchmark test --- .github/workflows/benchmark-prs.yml | 748 ++++++++++++++-------------- 1 file changed, 368 insertions(+), 380 deletions(-) diff --git a/.github/workflows/benchmark-prs.yml b/.github/workflows/benchmark-prs.yml index 8c6dab409e..13da75ef2d 100644 --- a/.github/workflows/benchmark-prs.yml +++ b/.github/workflows/benchmark-prs.yml @@ -9,386 +9,374 @@ env: NODE_DATA_PATH: /home/runner/.local/share/safe/node jobs: - # benchmark-cli: - # name: Compare sn_cli benchmarks to main - # # right now only ubuntu, running on multiple systems would require many pushes...\ - # # perhaps this can be done with one consolidation action in the future, pulling down all results and pushing - # # once to the branch.. - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v4 - - # - uses: dtolnay/rust-toolchain@stable - # with: - # components: rustfmt, clippy - - # - uses: Swatinem/rust-cache@v2 - # continue-on-error: true - - # ######################## - # ### Setup ### - # ######################## - # - run: cargo install cargo-criterion - - # - name: install ripgrep - # run: sudo apt-get -y install ripgrep - - # - name: Download 95mb file to be uploaded with the safe client - # shell: bash - # run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - - # # As normal user won't care much about initial client startup, - # # but be more alerted on communication speed during transmission. - # # Meanwhile the criterion testing code includes the client startup as well, - # # it will be better to execute bench test with `local`, - # # to make the measurement results reflect speed improvement or regression more accurately. - # - name: Build sn bins - # run: cargo build --release --bin safe --bin safenode --features local - # timeout-minutes: 30 - - # - name: Build faucet bin - # run: cargo build --release --bin faucet --features local --features gifting --no-default-features - # timeout-minutes: 30 - - # - name: Start a local network - # uses: maidsafe/sn-local-testnet-action@main - # env: - # SN_LOG: "all" - # with: - # action: start - # interval: 2000 - # node-path: target/release/safenode - # faucet-path: target/release/faucet - # platform: ubuntu-latest - # build: true - - # - name: Check SAFE_PEERS was set - # shell: bash - # run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" - - # ######################### - # ### Upload large file ### - # ######################### - - # - name: Fund cli wallet - # shell: bash - # run: target/release/safe --log-output-dest=data-dir wallet get-faucet 127.0.0.1:8000 - # env: - # SN_LOG: "all" - - # - name: Start a client instance to compare memory usage - # shell: bash - # run: target/release/safe --log-output-dest=data-dir files upload the-test-data.zip --retry-strategy quick - # env: - # SN_LOG: "all" - - # - name: Cleanup uploaded_files folder to avoid pollute download benchmark - # shell: bash - # run: rm -rf $CLIENT_DATA_PATH/uploaded_files - - # ########################### - # ### Client Mem Analysis ### - # ########################### - - # - name: Check client memory usage - # shell: bash - # run: | - # client_peak_mem_limit_mb="1024" # mb - # client_avg_mem_limit_mb="512" # mb - - # peak_mem_usage=$( - # rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - # awk -F':' '/"memory_used_mb":/{print $2}' | - # sort -n | - # tail -n 1 - # ) - # echo "Peak memory usage: $peak_mem_usage MB" - # if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then - # echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" - # exit 1 - # fi - - # total_mem=$( - # rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - # awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' - # ) - # num_of_times=$( - # rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | - # rg "(\d+) matches" | - # rg "\d+" -o - # ) - # echo "num_of_times: $num_of_times" - # echo "Total memory is: $total_mem" - # average_mem=$(($total_mem/$(($num_of_times)))) - # echo "Average memory is: $average_mem" - - # if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then - # echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" - # exit 1 - # fi - # # Write the client memory usage to a file - # echo '[ - # { - # "name": "client-peak-memory-usage-during-upload", - # "value": '$peak_mem_usage', - # "unit": "MB" - # }, - # { - # "name": "client-average-memory-usage-during-upload", - # "value": '$average_mem', - # "unit": "MB" - # } - # ]' > client_memory_usage.json - - # - name: check client_memory_usage.json - # shell: bash - # run: cat client_memory_usage.json - - # - name: Alert for client memory usage - # uses: benchmark-action/github-action-benchmark@v1 - # with: - # name: "Memory Usage of Client during uploading large file" - # tool: "customSmallerIsBetter" - # output-file-path: client_memory_usage.json - # # Where the previous data file is stored - # external-data-json-path: ./cache/client-mem-usage.json - # # Workflow will fail when an alert happens - # fail-on-alert: true - # # GitHub API token to make a commit comment - # github-token: ${{ secrets.GITHUB_TOKEN }} - # # Enable alert commit comment - # comment-on-alert: true - # # 200% regression will result in alert - # alert-threshold: "200%" - # # Enable Job Summary for PRs - # summary-always: true - - # ######################## - # ### Benchmark ### - # ######################## - # - name: Bench `safe` cli - # shell: bash - # # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, - # # passes to tee which displays it in the terminal and writes to output.txt - # run: | - # cargo criterion --features=local --message-format=json 2>&1 -p sn_cli | tee -a output.txt - # cat output.txt | rg benchmark-complete | jq -s 'map({ - # name: (.id | split("/"))[-1], - # unit: "MiB/s", - # value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9)) - # })' > files-benchmark.json - # timeout-minutes: 15 - - # - name: Confirming the number of files uploaded and downloaded during the benchmark test - # shell: bash - # run: | - # ls -l $CLIENT_DATA_PATH - # ls -l $CLIENT_DATA_PATH/uploaded_files - # ls -l $CLIENT_DATA_PATH/safe_files - - # - name: Store benchmark result - # uses: benchmark-action/github-action-benchmark@v1 - # with: - # # What benchmark tool the output.txt came from - # tool: "customBiggerIsBetter" - # output-file-path: files-benchmark.json - # # Where the previous data file is stored - # external-data-json-path: ./cache/benchmark-data.json - # # Workflow will fail when an alert happens - # fail-on-alert: true - # # GitHub API token to make a commit comment - # github-token: ${{ secrets.GITHUB_TOKEN }} - # # Enable alert commit comment - # comment-on-alert: true - # # 200% regression will result in alert - # alert-threshold: "200%" - # # Enable Job Summary for PRs - # summary-always: true - - # - name: Start a client to carry out download to output the logs - # shell: bash - # run: target/release/safe --log-output-dest=data-dir files download --retry-strategy quick - - # - name: Start a client to simulate criterion upload - # shell: bash - # run: | - # ls -l target/release - # target/release/safe --log-output-dest=data-dir files upload target/release/faucet --retry-strategy quick - - # ######################### - # ### Stop Network ### - # ######################### - - # - name: Stop the local network - # if: always() - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: stop - # log_file_prefix: safe_test_logs_benchmark - # platform: ubuntu-latest - # build: true - - # - name: Upload Faucet folder - # uses: actions/upload-artifact@main - # with: - # name: faucet_folder - # path: /home/runner/.local/share/safe/test_faucet - # continue-on-error: true - # if: always() - - # ######################### - # ### Node Mem Analysis ### - # ######################### - - # # The large file uploaded will increase node's peak mem usage a lot - # - name: Check node memory usage - # shell: bash - # run: | - # node_peak_mem_limit_mb="250" # mb - # peak_mem_usage=$( - # rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | - # awk -F':' '/"memory_used_mb":/{print $2}' | - # sort -n | - # tail -n 1 - # ) - - # echo "Memory usage: $peak_mem_usage MB" - # if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then - # echo "Node memory usage exceeded threshold: $peak_mem_usage MB" - # exit 1 - # fi - # # Write the node memory usage to a file - # echo '[ - # { - # "name": "node-memory-usage-through-safe-benchmark", - # "value": '$peak_mem_usage', - # "unit": "MB" - # } - # ]' > node_memory_usage.json - - # - name: check node_memory_usage.json - # shell: bash - # run: cat node_memory_usage.json - - # - name: Alert for node memory usage - # uses: benchmark-action/github-action-benchmark@v1 - # with: - # tool: "customSmallerIsBetter" - # output-file-path: node_memory_usage.json - # # Where the previous data file is stored - # external-data-json-path: ./cache/node-mem-usage.json - # # Workflow will fail when an alert happens - # fail-on-alert: true - # # GitHub API token to make a commit comment - # github-token: ${{ secrets.GITHUB_TOKEN }} - # # Enable alert commit comment - # comment-on-alert: true - # # Comment on the PR - # comment-always: true - # # 200% regression will result in alert - # alert-threshold: "200%" - # # Enable Job Summary for PRs - # summary-always: true - - # ########################################### - # ### Swarm_driver handling time Analysis ### - # ########################################### - - # - name: Check swarm_driver handling time - # shell: bash - # run: | - # num_of_times=$( - # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - # rg "(\d+) matches" | - # rg "\d+" -o - # ) - # echo "Number of long cmd handling times: $num_of_times" - # total_long_handling_ms=$( - # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - # ) - # echo "Total cmd long handling time is: $total_long_handling_ms ms" - # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - # echo "Average cmd long handling time is: $average_handling_ms ms" - # total_long_handling=$(($total_long_handling_ms)) - # total_num_of_times=$(($num_of_times)) - # num_of_times=$( - # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - # rg "(\d+) matches" | - # rg "\d+" -o - # ) - # echo "Number of long event handling times: $num_of_times" - # total_long_handling_ms=$( - # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - # ) - # echo "Total event long handling time is: $total_long_handling_ms ms" - # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - # echo "Average event long handling time is: $average_handling_ms ms" - # total_long_handling=$(($total_long_handling_ms+$total_long_handling)) - # total_num_of_times=$(($num_of_times+$total_num_of_times)) - # average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) - # echo "Total swarm_driver long handling times is: $total_num_of_times" - # echo "Total swarm_driver long handling duration is: $total_long_handling ms" - # echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" - # total_num_of_times_limit_hits="30000" # hits - # total_long_handling_limit_ms="400000" # ms - # average_handling_limit_ms="20" # ms - # if (( $(echo "$total_num_of_times > $total_num_of_times_limit_hits" | bc -l) )); then - # echo "Swarm_driver long handling times exceeded threshold: $total_num_of_times hits" - # exit 1 - # fi - # if (( $(echo "$total_long_handling > $total_long_handling_limit_ms" | bc -l) )); then - # echo "Swarm_driver total long handling duration exceeded threshold: $total_long_handling ms" - # exit 1 - # fi - # if (( $(echo "$average_handling_ms > $average_handling_limit_ms" | bc -l) )); then - # echo "Swarm_driver average long handling time exceeded threshold: $average_handling_ms ms" - # exit 1 - # fi - - # # Write the node memory usage to a file - # echo '[ - # { - # "name": "swarm_driver long handling times", - # "value": '$total_num_of_times', - # "unit": "hits" - # }, - # { - # "name": "swarm_driver long handling total_time", - # "value": '$total_long_handling', - # "unit": "ms" - # }, - # { - # "name": "swarm_driver average long handling time", - # "value": '$average_handling_ms', - # "unit": "ms" - # } - # ]' > swarm_driver_long_handlings.json - - # - name: check swarm_driver_long_handlings.json - # shell: bash - # run: cat swarm_driver_long_handlings.json - - # - name: Alert for swarm_driver long handlings - # uses: benchmark-action/github-action-benchmark@v1 - # with: - # tool: "customSmallerIsBetter" - # output-file-path: swarm_driver_long_handlings.json - # # Where the previous data file is stored - # external-data-json-path: ./cache/swarm_driver_long_handlings.json - # # Workflow will fail when an alert happens - # fail-on-alert: true - # # GitHub API token to make a commit comment - # github-token: ${{ secrets.GITHUB_TOKEN }} - # # Enable alert commit comment - # comment-on-alert: true - # # Comment on the PR - # comment-always: true - # # 200% regression will result in alert - # alert-threshold: "200%" - # # Enable Job Summary for PRs - # summary-always: true + benchmark-cli: + name: Compare autonomi_cli benchmarks to main + # right now only ubuntu, running on multiple systems would require many pushes...\ + # perhaps this can be done with one consolidation action in the future, pulling down all results and pushing + # once to the branch.. + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt, clippy + + - uses: Swatinem/rust-cache@v2 + continue-on-error: true + + ######################## + ### Setup ### + ######################## + - run: cargo install cargo-criterion + + - name: install ripgrep + run: sudo apt-get -y install ripgrep + + - name: Download 95mb file to be uploaded with the safe client + shell: bash + run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip + + # As normal user won't care much about initial client startup, + # but be more alerted on communication speed during transmission. + # Meanwhile the criterion testing code includes the client startup as well, + # it will be better to execute bench test with `local`, + # to make the measurement results reflect speed improvement or regression more accurately. + - name: Build binaries + run: cargo build --release --features local --bin safenode --bin autonomi + timeout-minutes: 30 + + - name: Start a local network + uses: maidsafe/sn-local-testnet-action@main + env: + SN_LOG: "all" + with: + action: start + enable-evm-testnet: true + node-path: target/release/safenode + platform: ubuntu-latest + build: true + + - name: Check SAFE_PEERS was set + shell: bash + run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" + + - name: export default secret key + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash + + ######################### + ### Upload large file ### + ######################### + + - name: Start a client instance to compare memory usage + shell: bash + run: ./target/release/autonomi --log-output-dest=data-dir file upload "./the-test-data.zip" + env: + SN_LOG: "all" + timeout-minutes: 5 + + - name: Cleanup uploaded_files folder to avoid pollute download benchmark + shell: bash + run: | + ls -l $CLIENT_DATA_PATH + rm -rf $CLIENT_DATA_PATH/uploaded_files + + ########################### + ### Client Mem Analysis ### + ########################### + + - name: Check client memory usage + shell: bash + run: | + client_peak_mem_limit_mb="1024" # mb + client_avg_mem_limit_mb="512" # mb + + peak_mem_usage=$( + rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/{print $2}' | + sort -n | + tail -n 1 + ) + echo "Peak memory usage: $peak_mem_usage MB" + if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then + echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" + exit 1 + fi + + total_mem=$( + rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob autonomi.* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' + ) + num_of_times=$( + rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob autonomi.* -c --stats | + rg "(\d+) matches" | + rg "\d+" -o + ) + echo "num_of_times: $num_of_times" + echo "Total memory is: $total_mem" + average_mem=$(($total_mem/$(($num_of_times)))) + echo "Average memory is: $average_mem" + + if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then + echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" + exit 1 + fi + # Write the client memory usage to a file + echo '[ + { + "name": "client-peak-memory-usage-during-upload", + "value": '$peak_mem_usage', + "unit": "MB" + }, + { + "name": "client-average-memory-usage-during-upload", + "value": '$average_mem', + "unit": "MB" + } + ]' > client_memory_usage.json + + - name: check client_memory_usage.json + shell: bash + run: cat client_memory_usage.json + + - name: Alert for client memory usage + uses: benchmark-action/github-action-benchmark@v1 + with: + name: "Memory Usage of Client during uploading large file" + tool: "customSmallerIsBetter" + output-file-path: client_memory_usage.json + # Where the previous data file is stored + external-data-json-path: ./cache/client-mem-usage.json + # Workflow will fail when an alert happens + fail-on-alert: true + # GitHub API token to make a commit comment + github-token: ${{ secrets.GITHUB_TOKEN }} + # Enable alert commit comment + comment-on-alert: true + # 200% regression will result in alert + alert-threshold: "200%" + # Enable Job Summary for PRs + summary-always: true + + # ######################## + # ### Benchmark ### + # ######################## + # - name: Bench `safe` cli + # shell: bash + # # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, + # # passes to tee which displays it in the terminal and writes to output.txt + # run: | + # cargo criterion --features=local --message-format=json 2>&1 -p sn_cli | tee -a output.txt + # cat output.txt | rg benchmark-complete | jq -s 'map({ + # name: (.id | split("/"))[-1], + # unit: "MiB/s", + # value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9)) + # })' > files-benchmark.json + # timeout-minutes: 15 + + # - name: Confirming the number of files uploaded and downloaded during the benchmark test + # shell: bash + # run: | + # ls -l $CLIENT_DATA_PATH + # ls -l $CLIENT_DATA_PATH/uploaded_files + # ls -l $CLIENT_DATA_PATH/safe_files + + # - name: Store benchmark result + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # # What benchmark tool the output.txt came from + # tool: "customBiggerIsBetter" + # output-file-path: files-benchmark.json + # # Where the previous data file is stored + # external-data-json-path: ./cache/benchmark-data.json + # # Workflow will fail when an alert happens + # fail-on-alert: true + # # GitHub API token to make a commit comment + # github-token: ${{ secrets.GITHUB_TOKEN }} + # # Enable alert commit comment + # comment-on-alert: true + # # 200% regression will result in alert + # alert-threshold: "200%" + # # Enable Job Summary for PRs + # summary-always: true + + # - name: Start a client to carry out download to output the logs + # shell: bash + # run: target/release/safe --log-output-dest=data-dir files download --retry-strategy quick + + # - name: Start a client to simulate criterion upload + # shell: bash + # run: | + # ls -l target/release + # target/release/safe --log-output-dest=data-dir files upload target/release/faucet --retry-strategy quick + + ######################### + ### Stop Network ### + ######################### + + - name: Stop the local network + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + log_file_prefix: safe_test_logs_benchmark + platform: ubuntu-latest + build: true + + ######################### + ### Node Mem Analysis ### + ######################### + + # The large file uploaded will increase node's peak mem usage a lot + - name: Check node memory usage + shell: bash + run: | + node_peak_mem_limit_mb="250" # mb + peak_mem_usage=$( + rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | + awk -F':' '/"memory_used_mb":/{print $2}' | + sort -n | + tail -n 1 + ) + + echo "Memory usage: $peak_mem_usage MB" + if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then + echo "Node memory usage exceeded threshold: $peak_mem_usage MB" + exit 1 + fi + # Write the node memory usage to a file + echo '[ + { + "name": "node-memory-usage-through-safe-benchmark", + "value": '$peak_mem_usage', + "unit": "MB" + } + ]' > node_memory_usage.json + + - name: check node_memory_usage.json + shell: bash + run: cat node_memory_usage.json + + - name: Alert for node memory usage + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "customSmallerIsBetter" + output-file-path: node_memory_usage.json + # Where the previous data file is stored + external-data-json-path: ./cache/node-mem-usage.json + # Workflow will fail when an alert happens + fail-on-alert: true + # GitHub API token to make a commit comment + github-token: ${{ secrets.GITHUB_TOKEN }} + # Enable alert commit comment + comment-on-alert: true + # Comment on the PR + comment-always: true + # 200% regression will result in alert + alert-threshold: "200%" + # Enable Job Summary for PRs + summary-always: true + + ########################################### + ### Swarm_driver handling time Analysis ### + ########################################### + + - name: Check swarm_driver handling time + shell: bash + run: | + num_of_times=$( + rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats | + rg "(\d+) matches" | + rg "\d+" -o + ) + echo "Number of long cmd handling times: $num_of_times" + total_long_handling_ms=$( + rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename | + awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + ) + echo "Total cmd long handling time is: $total_long_handling_ms ms" + average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + echo "Average cmd long handling time is: $average_handling_ms ms" + total_long_handling=$(($total_long_handling_ms)) + total_num_of_times=$(($num_of_times)) + num_of_times=$( + rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -c --stats | + rg "(\d+) matches" | + rg "\d+" -o + ) + echo "Number of long event handling times: $num_of_times" + total_long_handling_ms=$( + rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safenode.* -o --no-line-number --no-filename | + awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + ) + echo "Total event long handling time is: $total_long_handling_ms ms" + average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + echo "Average event long handling time is: $average_handling_ms ms" + total_long_handling=$(($total_long_handling_ms+$total_long_handling)) + total_num_of_times=$(($num_of_times+$total_num_of_times)) + average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) + echo "Total swarm_driver long handling times is: $total_num_of_times" + echo "Total swarm_driver long handling duration is: $total_long_handling ms" + echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" + total_num_of_times_limit_hits="30000" # hits + total_long_handling_limit_ms="400000" # ms + average_handling_limit_ms="20" # ms + if (( $(echo "$total_num_of_times > $total_num_of_times_limit_hits" | bc -l) )); then + echo "Swarm_driver long handling times exceeded threshold: $total_num_of_times hits" + exit 1 + fi + if (( $(echo "$total_long_handling > $total_long_handling_limit_ms" | bc -l) )); then + echo "Swarm_driver total long handling duration exceeded threshold: $total_long_handling ms" + exit 1 + fi + if (( $(echo "$average_handling_ms > $average_handling_limit_ms" | bc -l) )); then + echo "Swarm_driver average long handling time exceeded threshold: $average_handling_ms ms" + exit 1 + fi + + # Write the node memory usage to a file + echo '[ + { + "name": "swarm_driver long handling times", + "value": '$total_num_of_times', + "unit": "hits" + }, + { + "name": "swarm_driver long handling total_time", + "value": '$total_long_handling', + "unit": "ms" + }, + { + "name": "swarm_driver average long handling time", + "value": '$average_handling_ms', + "unit": "ms" + } + ]' > swarm_driver_long_handlings.json + + - name: check swarm_driver_long_handlings.json + shell: bash + run: cat swarm_driver_long_handlings.json + + - name: Alert for swarm_driver long handlings + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "customSmallerIsBetter" + output-file-path: swarm_driver_long_handlings.json + # Where the previous data file is stored + external-data-json-path: ./cache/swarm_driver_long_handlings.json + # Workflow will fail when an alert happens + fail-on-alert: true + # GitHub API token to make a commit comment + github-token: ${{ secrets.GITHUB_TOKEN }} + # Enable alert commit comment + comment-on-alert: true + # Comment on the PR + comment-always: true + # 200% regression will result in alert + alert-threshold: "200%" + # Enable Job Summary for PRs + summary-always: true benchmark-cash: name: Compare sn_transfer benchmarks to main From fb174b94474aa599c9232434621af44942498da1 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 21 Oct 2024 14:07:48 +0900 Subject: [PATCH 058/128] fix(networking): replication factor of 5 to reduce CPU overhead --- sn_networking/src/driver.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 1ad9c3e7a9..27cc340e8e 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -65,6 +65,7 @@ use std::{ fs, io::{Read, Write}, net::SocketAddr, + num::NonZeroUsize, path::PathBuf, }; use tokio::sync::{mpsc, oneshot}; @@ -130,6 +131,13 @@ const NETWORKING_CHANNEL_SIZE: usize = 10_000; /// Time before a Kad query times out if no response is received const KAD_QUERY_TIMEOUT_S: Duration = Duration::from_secs(10); +// Init during compilation, instead of runtime error that should never happen +// Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) +const REPLICATION_FACTOR: NonZeroUsize = match NonZeroUsize::new(CLOSE_GROUP_SIZE) { + Some(v) => v, + None => panic!("CLOSE_GROUP_SIZE should not be zero"), +}; + /// The various settings to apply to when fetching a record from network #[derive(Clone)] pub struct GetRecordCfg { @@ -354,6 +362,7 @@ impl NetworkBuilder { .disjoint_query_paths(true) // Records never expire .set_record_ttl(None) + .set_replication_factor(REPLICATION_FACTOR) // Emit PUT events for validation prior to insertion into the RecordStore. // This is no longer needed as the record_storage::put now can carry out validation. // .set_record_filtering(KademliaStoreInserts::FilterBoth) @@ -437,6 +446,7 @@ impl NetworkBuilder { let _ = kad_cfg .set_kbucket_inserts(libp2p::kad::BucketInserts::Manual) .set_max_packet_size(MAX_PACKET_SIZE) + .set_replication_factor(REPLICATION_FACTOR) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. .disjoint_query_paths(true); From 9523ce03485ea01438c888c81bdb95d78e51a956 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 22 Oct 2024 15:33:26 +0900 Subject: [PATCH 059/128] feat: local user data in CLI, list commands --- Cargo.lock | 1 + autonomi-cli/Cargo.toml | 1 + autonomi-cli/src/access/data_dir.rs | 13 +++- autonomi-cli/src/access/mod.rs | 1 + autonomi-cli/src/access/user_data.rs | 91 +++++++++++++++++++++++++++ autonomi-cli/src/commands.rs | 4 +- autonomi-cli/src/commands/file.rs | 44 ++++++++----- autonomi-cli/src/commands/register.rs | 43 ++++++++----- autonomi-cli/src/main.rs | 1 + 9 files changed, 166 insertions(+), 33 deletions(-) create mode 100644 autonomi-cli/src/access/user_data.rs diff --git a/Cargo.lock b/Cargo.lock index 2bb7a1990b..cba76198d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1138,6 +1138,7 @@ dependencies = [ "tempfile", "tokio", "tracing", + "walkdir", ] [[package]] diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index 70b361d4be..20c26e43d6 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -47,6 +47,7 @@ tracing = { version = "~0.1.26" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } sn_build_info = { path = "../sn_build_info", version = "0.1.16" } sn_logging = { path = "../sn_logging", version = "0.2.37" } +walkdir = "2.5.0" [dev-dependencies] autonomi = { path = "../autonomi", version = "0.2.0", features = [ diff --git a/autonomi-cli/src/access/data_dir.rs b/autonomi-cli/src/access/data_dir.rs index b694d3f6fb..9233507264 100644 --- a/autonomi-cli/src/access/data_dir.rs +++ b/autonomi-cli/src/access/data_dir.rs @@ -6,7 +6,10 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use color_eyre::eyre::{eyre, Context, Result}; +use color_eyre::{ + eyre::{eyre, Context, Result}, + Section, +}; use std::path::PathBuf; pub fn get_client_data_dir_path() -> Result { @@ -14,6 +17,12 @@ pub fn get_client_data_dir_path() -> Result { .ok_or_else(|| eyre!("Failed to obtain data dir, your OS might not be supported."))?; home_dirs.push("safe"); home_dirs.push("autonomi"); - std::fs::create_dir_all(home_dirs.as_path()).wrap_err("Failed to create data dir")?; + std::fs::create_dir_all(home_dirs.as_path()) + .wrap_err("Failed to create data dir") + .with_suggestion(|| { + format!( + "make sure you have the correct permissions to access the data dir: {home_dirs:?}" + ) + })?; Ok(home_dirs) } diff --git a/autonomi-cli/src/access/mod.rs b/autonomi-cli/src/access/mod.rs index ac80eeca88..327dc6db51 100644 --- a/autonomi-cli/src/access/mod.rs +++ b/autonomi-cli/src/access/mod.rs @@ -9,3 +9,4 @@ pub mod data_dir; pub mod keys; pub mod network; +pub mod user_data; diff --git a/autonomi-cli/src/access/user_data.rs b/autonomi-cli/src/access/user_data.rs new file mode 100644 index 0000000000..e684c44d78 --- /dev/null +++ b/autonomi-cli/src/access/user_data.rs @@ -0,0 +1,91 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::collections::HashMap; + +use autonomi::client::{ + address::{addr_to_str, str_to_addr}, + archive::ArchiveAddr, + registers::RegisterAddress, + vault_user_data::UserData, +}; +use color_eyre::eyre::Result; + +use super::{data_dir::get_client_data_dir_path, keys::get_register_signing_key}; + +pub fn get_local_user_data() -> Result { + let register_key = get_register_signing_key()?; + let registers = get_local_registers()?; + let file_archives = get_local_file_archives()?; + + let user_data = UserData { + register_sk: Some(register_key.to_hex()), + registers, + file_archives, + }; + Ok(user_data) +} + +pub fn get_local_registers() -> Result> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let registers_path = user_data_path.join("registers"); + std::fs::create_dir_all(®isters_path)?; + + let mut registers = HashMap::new(); + for entry in walkdir::WalkDir::new(registers_path) + .min_depth(1) + .max_depth(1) + { + let entry = entry?; + let file_name = entry.file_name().to_string_lossy(); + let register_address = RegisterAddress::from_hex(&file_name)?; + let file_content = std::fs::read_to_string(entry.path())?; + let register_name = file_content; + registers.insert(register_address, register_name); + } + Ok(registers) +} + +pub fn get_local_file_archives() -> Result> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let file_archives_path = user_data_path.join("file_archives"); + std::fs::create_dir_all(&file_archives_path)?; + + let mut file_archives = HashMap::new(); + for entry in walkdir::WalkDir::new(file_archives_path) + .min_depth(1) + .max_depth(1) + { + let entry = entry?; + let file_name = entry.file_name().to_string_lossy(); + let file_archive_address = str_to_addr(&file_name)?; + let file_archive_name = std::fs::read_to_string(entry.path())?; + file_archives.insert(file_archive_address, file_archive_name); + } + Ok(file_archives) +} + +pub fn write_local_register(register: &RegisterAddress, name: &str) -> Result<()> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let registers_path = user_data_path.join("registers"); + std::fs::create_dir_all(®isters_path)?; + std::fs::write(registers_path.join(register.to_hex()), name)?; + Ok(()) +} + +pub fn write_local_file_archive(archive: &ArchiveAddr, name: &str) -> Result<()> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let file_archives_path = user_data_path.join("file_archives"); + std::fs::create_dir_all(&file_archives_path)?; + std::fs::write(file_archives_path.join(addr_to_str(*archive)), name)?; + Ok(()) +} diff --git a/autonomi-cli/src/commands.rs b/autonomi-cli/src/commands.rs index bb718df43a..6beeaf4405 100644 --- a/autonomi-cli/src/commands.rs +++ b/autonomi-cli/src/commands.rs @@ -140,7 +140,7 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { FileCmd::Download { addr, dest_file } => { file::download(&addr, &dest_file, peers.await?).await } - FileCmd::List => file::list(peers.await?), + FileCmd::List => file::list(), }, SubCmd::Register { command } => match command { RegisterCmd::GenerateKey { overwrite } => register::generate_key(overwrite), @@ -156,7 +156,7 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { value, } => register::edit(address, name, &value, peers.await?).await, RegisterCmd::Get { address, name } => register::get(address, name, peers.await?).await, - RegisterCmd::List => register::list(peers.await?), + RegisterCmd::List => register::list(), }, SubCmd::Vault { command } => match command { VaultCmd::Cost => vault::cost(peers.await?), diff --git a/autonomi-cli/src/commands/file.rs b/autonomi-cli/src/commands/file.rs index f3c49d8cb4..1e1073b9e1 100644 --- a/autonomi-cli/src/commands/file.rs +++ b/autonomi-cli/src/commands/file.rs @@ -11,6 +11,7 @@ use autonomi::client::address::addr_to_str; use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::eyre::Result; +use color_eyre::Section; use std::path::PathBuf; pub async fn cost(file: &str, peers: Vec) -> Result<()> { @@ -28,6 +29,7 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { info!("Total cost: {cost} for file: {file}"); Ok(()) } + pub async fn upload(file: &str, peers: Vec) -> Result<()> { let wallet = crate::keys::load_evm_wallet()?; let mut client = crate::actions::connect_to_network(peers).await?; @@ -43,28 +45,42 @@ pub async fn upload(file: &str, peers: Vec) -> Result<()> { .wrap_err("Failed to upload file")?; let addr = addr_to_str(xor_name); - println!("Successfully uploaded: {file}"); - println!("At address: {addr}"); - info!("Successfully uploaded: {file} at address: {addr}"); - if let Ok(()) = upload_completed_tx.send(()) { - let summary = upload_summary_thread.await?; - if summary.record_count == 0 { - println!("All chunks already exist on the network"); - } else { - println!("Number of chunks uploaded: {}", summary.record_count); - println!("Total cost: {} AttoTokens", summary.tokens_spent); - } - info!("Summary for upload of file {file} at {addr:?}: {summary:?}"); + if let Err(e) = upload_completed_tx.send(()) { + error!("Failed to send upload completed event: {e:?}"); + eprintln!("Failed to send upload completed event: {e:?}"); + } + + let summary = upload_summary_thread.await?; + if summary.record_count == 0 { + println!("All chunks already exist on the network."); + } else { + println!("Successfully uploaded: {file}"); + println!("At address: {addr}"); + info!("Successfully uploaded: {file} at address: {addr}"); + println!("Number of chunks uploaded: {}", summary.record_count); + println!("Total cost: {} AttoTokens", summary.tokens_spent); } + info!("Summary for upload of file {file} at {addr:?}: {summary:?}"); + + crate::user_data::write_local_file_archive(&xor_name, file) + .wrap_err("Failed to save file to local user data") + .with_suggestion(|| "Local user data saves the file address above to disk, without it you need to keep track of the address yourself")?; + info!("Saved file to local user data"); Ok(()) } + pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Result<()> { let mut client = crate::actions::connect_to_network(peers).await?; crate::actions::download(addr, dest_path, &mut client).await } -pub fn list(_peers: Vec) -> Result<()> { - println!("The file list feature is coming soon!"); +pub fn list() -> Result<()> { + println!("Retrieving local user data..."); + let file_archives = crate::user_data::get_local_file_archives()?; + println!("✅ You have {} file archive(s):", file_archives.len()); + for (addr, name) in file_archives { + println!("{}: {}", name, addr_to_str(addr)); + } Ok(()) } diff --git a/autonomi-cli/src/commands/register.rs b/autonomi-cli/src/commands/register.rs index b5be4f0230..0fdd9437ea 100644 --- a/autonomi-cli/src/commands/register.rs +++ b/autonomi-cli/src/commands/register.rs @@ -90,20 +90,28 @@ pub async fn create(name: &str, value: &str, public: bool, peers: Vec let address = register.address(); - println!("✅ Register created at address: {address}"); - println!("With name: {name}"); - println!("And initial value: [{value}]"); - info!("✅ Register created at address: {address} with name: {name}"); - - if let Ok(()) = upload_completed_tx.send(()) { - let summary = upload_summary_thread.await?; - if summary.record_count == 0 { - println!("The register was already created on the network. No tokens were spent."); - } else { - println!("Total cost: {} AttoTokens", summary.tokens_spent); - } - info!("Summary of register creation: {summary:?}"); + if let Err(e) = upload_completed_tx.send(()) { + error!("Failed to send upload completed event: {e:?}"); + eprintln!("Failed to send upload completed event: {e:?}"); + } + + let summary = upload_summary_thread.await?; + if summary.record_count == 0 { + println!("✅ The register already exists on the network at address: {address}."); + println!("No tokens were spent."); + } else { + println!("✅ Register created at address: {address}"); + println!("With name: {name}"); + println!("And initial value: [{value}]"); + info!("Register created at address: {address} with name: {name}"); + println!("Total cost: {} AttoTokens", summary.tokens_spent); } + info!("Summary of register creation: {summary:?}"); + + crate::user_data::write_local_register(address, name) + .wrap_err("Failed to save register to local user data") + .with_suggestion(|| "Local user data saves the register address above to disk, without it you need to keep track of the address yourself")?; + info!("Saved register to local user data"); Ok(()) } @@ -183,7 +191,12 @@ pub async fn get(address: String, name: bool, peers: Vec) -> Result<( Ok(()) } -pub fn list(_peers: Vec) -> Result<()> { - println!("The register feature is coming soon!"); +pub fn list() -> Result<()> { + println!("Retrieving local user data..."); + let registers = crate::user_data::get_local_registers()?; + println!("✅ You have {} register(s):", registers.len()); + for (addr, name) in registers { + println!("{}: {}", name, addr.to_hex()); + } Ok(()) } diff --git a/autonomi-cli/src/main.rs b/autonomi-cli/src/main.rs index de4cdcf4c4..0953d81d1d 100644 --- a/autonomi-cli/src/main.rs +++ b/autonomi-cli/src/main.rs @@ -18,6 +18,7 @@ mod utils; pub use access::data_dir; pub use access::keys; pub use access::network; +pub use access::user_data; use clap::Parser; use color_eyre::Result; From 893dd2df5ae56440cbb3edf3b1597d4330f84076 Mon Sep 17 00:00:00 2001 From: grumbach Date: Wed, 23 Oct 2024 18:57:12 +0900 Subject: [PATCH 060/128] feat: vault in CLI and vault key generation from evm sk --- autonomi-cli/src/access/keys.rs | 7 ++ autonomi-cli/src/access/user_data.rs | 25 +++++- autonomi-cli/src/commands.rs | 22 ++++- autonomi-cli/src/commands/file.rs | 9 +- autonomi-cli/src/commands/vault.rs | 89 +++++++++++++++++-- autonomi/Cargo.toml | 1 + autonomi/src/client/mod.rs | 2 - autonomi/src/client/vault.rs | 46 ++++++++-- autonomi/src/client/vault/key.rs | 43 +++++++++ .../user_data.rs} | 41 ++++----- evmlib/src/cryptography.rs | 48 ++++++++++ sn_evm/src/lib.rs | 1 + 12 files changed, 288 insertions(+), 46 deletions(-) create mode 100644 autonomi/src/client/vault/key.rs rename autonomi/src/client/{vault_user_data.rs => vault/user_data.rs} (86%) diff --git a/autonomi-cli/src/access/keys.rs b/autonomi-cli/src/access/keys.rs index 18310f4831..ecdc5aee10 100644 --- a/autonomi-cli/src/access/keys.rs +++ b/autonomi-cli/src/access/keys.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use autonomi::client::registers::RegisterSecretKey; +use autonomi::client::vault::VaultSecretKey; use autonomi::{get_evm_network_from_env, Wallet}; use color_eyre::eyre::{Context, Result}; use color_eyre::Section; @@ -51,6 +52,12 @@ pub fn get_secret_key() -> Result { .with_suggestion(|| "the secret key should be a hex encoded string of your evm wallet private key") } +pub fn get_vault_secret_key() -> Result { + let secret_key = get_secret_key()?; + autonomi::client::vault::derive_vault_key(&secret_key) + .wrap_err("Failed to derive vault secret key from EVM secret key") +} + pub fn create_register_signing_key_file(key: RegisterSecretKey) -> Result { let dir = super::data_dir::get_client_data_dir_path() .wrap_err("Could not access directory to write key to")?; diff --git a/autonomi-cli/src/access/user_data.rs b/autonomi-cli/src/access/user_data.rs index e684c44d78..ab3c791ce2 100644 --- a/autonomi-cli/src/access/user_data.rs +++ b/autonomi-cli/src/access/user_data.rs @@ -11,12 +11,15 @@ use std::collections::HashMap; use autonomi::client::{ address::{addr_to_str, str_to_addr}, archive::ArchiveAddr, - registers::RegisterAddress, - vault_user_data::UserData, + registers::{RegisterAddress, RegisterSecretKey}, + vault::UserData, }; use color_eyre::eyre::Result; -use super::{data_dir::get_client_data_dir_path, keys::get_register_signing_key}; +use super::{ + data_dir::get_client_data_dir_path, + keys::{create_register_signing_key_file, get_register_signing_key}, +}; pub fn get_local_user_data() -> Result { let register_key = get_register_signing_key()?; @@ -72,6 +75,22 @@ pub fn get_local_file_archives() -> Result> { Ok(file_archives) } +pub fn write_local_user_data(user_data: &UserData) -> Result<()> { + if let Some(register_key) = &user_data.register_sk { + let sk = RegisterSecretKey::from_hex(register_key)?; + create_register_signing_key_file(sk)?; + } + + for (register, name) in user_data.registers.iter() { + write_local_register(register, name)?; + } + + for (archive, name) in user_data.file_archives.iter() { + write_local_file_archive(archive, name)?; + } + Ok(()) +} + pub fn write_local_register(register: &RegisterAddress, name: &str) -> Result<()> { let data_dir = get_client_data_dir_path()?; let user_data_path = data_dir.join("user_data"); diff --git a/autonomi-cli/src/commands.rs b/autonomi-cli/src/commands.rs index 6beeaf4405..06adb34006 100644 --- a/autonomi-cli/src/commands.rs +++ b/autonomi-cli/src/commands.rs @@ -123,10 +123,23 @@ pub enum VaultCmd { Cost, /// Create a vault at a deterministic address based on your `SECRET_KEY`. + /// Pushing an encrypted backup of your local user data to the network Create, + /// Load an existing vault from the network. + /// Use this when loading your user data to a new device. + /// You need to have your original `SECRET_KEY` to load the vault. + Load, + /// Sync vault with the network, including registers and files. - Sync, + /// Loads existing user data from the network and merges it with your local user data. + /// Pushes your local user data to the network. + Sync { + /// Force push your local user data to the network. + /// This will overwrite any existing data in your vault. + #[arg(short, long)] + force: bool, + }, } pub async fn handle_subcommand(opt: Opt) -> Result<()> { @@ -159,9 +172,10 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { RegisterCmd::List => register::list(), }, SubCmd::Vault { command } => match command { - VaultCmd::Cost => vault::cost(peers.await?), - VaultCmd::Create => vault::create(peers.await?), - VaultCmd::Sync => vault::sync(peers.await?), + VaultCmd::Cost => vault::cost(peers.await?).await, + VaultCmd::Create => vault::create(peers.await?).await, + VaultCmd::Load => vault::load(peers.await?).await, + VaultCmd::Sync { force } => vault::sync(peers.await?, force).await, }, } } diff --git a/autonomi-cli/src/commands/file.rs b/autonomi-cli/src/commands/file.rs index 1e1073b9e1..faf21137e6 100644 --- a/autonomi-cli/src/commands/file.rs +++ b/autonomi-cli/src/commands/file.rs @@ -39,8 +39,13 @@ pub async fn upload(file: &str, peers: Vec) -> Result<()> { println!("Uploading data to network..."); info!("Uploading file: {file}"); + let dir_path = PathBuf::from(file); + let name = dir_path + .file_name() + .map(|n| n.to_string_lossy().to_string()) + .unwrap_or(file.to_string()); let xor_name = client - .dir_upload(PathBuf::from(file), &wallet) + .dir_upload(dir_path, &wallet) .await .wrap_err("Failed to upload file")?; let addr = addr_to_str(xor_name); @@ -62,7 +67,7 @@ pub async fn upload(file: &str, peers: Vec) -> Result<()> { } info!("Summary for upload of file {file} at {addr:?}: {summary:?}"); - crate::user_data::write_local_file_archive(&xor_name, file) + crate::user_data::write_local_file_archive(&xor_name, &name) .wrap_err("Failed to save file to local user data") .with_suggestion(|| "Local user data saves the file address above to disk, without it you need to keep track of the address yourself")?; info!("Saved file to local user data"); diff --git a/autonomi-cli/src/commands/vault.rs b/autonomi-cli/src/commands/vault.rs index 9a8d708824..9888366eec 100644 --- a/autonomi-cli/src/commands/vault.rs +++ b/autonomi-cli/src/commands/vault.rs @@ -7,19 +7,96 @@ // permissions and limitations relating to use of the SAFE Network Software. use autonomi::Multiaddr; +use color_eyre::eyre::Context; use color_eyre::eyre::Result; +use color_eyre::Section; -pub fn cost(_peers: Vec) -> Result<()> { - println!("The vault feature is coming soon!"); +pub async fn cost(peers: Vec) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + let vault_sk = crate::keys::get_vault_secret_key()?; + + println!("Getting cost to create a new vault..."); + let total_cost = client.vault_cost(&vault_sk).await?; + + if total_cost.is_zero() { + println!("Vault already exists, modifying an existing vault is free"); + } else { + println!("Cost to create a new vault: {total_cost} AttoTokens"); + } Ok(()) } -pub fn create(_peers: Vec) -> Result<()> { - println!("The vault feature is coming soon!"); +pub async fn create(peers: Vec) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + let wallet = crate::keys::load_evm_wallet()?; + let vault_sk = crate::keys::get_vault_secret_key()?; + + println!("Retrieving local user data..."); + let local_user_data = crate::user_data::get_local_user_data()?; + let file_archives_len = local_user_data.file_archives.len(); + let registers_len = local_user_data.registers.len(); + + println!("Pushing to network vault..."); + let total_cost = client + .put_user_data_to_vault(&vault_sk, &wallet, local_user_data) + .await?; + + if total_cost.is_zero() { + println!("✅ Successfully pushed user data to existing vault"); + } else { + println!("✅ Successfully created new vault containing local user data"); + } + + println!("Total cost: {total_cost} AttoTokens"); + println!("Vault contains {file_archives_len} file archive(s) and {registers_len} register(s)"); Ok(()) } -pub fn sync(_peers: Vec) -> Result<()> { - println!("The vault feature is coming soon!"); +pub async fn sync(peers: Vec, force: bool) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + let vault_sk = crate::keys::get_vault_secret_key()?; + let wallet = crate::keys::load_evm_wallet()?; + + println!("Fetching vault from network..."); + let net_user_data = client + .get_user_data_from_vault(&vault_sk) + .await + .wrap_err("Failed to fetch vault from network") + .with_suggestion(|| "Make sure you have already created a vault on the network")?; + + if force { + println!("The force flag was provided, overwriting user data in the vault with local user data..."); + } else { + println!("Syncing vault with local user data..."); + crate::user_data::write_local_user_data(&net_user_data)?; + } + + println!("Pushing local user data to network vault..."); + let local_user_data = crate::user_data::get_local_user_data()?; + let file_archives_len = local_user_data.file_archives.len(); + let registers_len = local_user_data.registers.len(); + client + .put_user_data_to_vault(&vault_sk, &wallet, local_user_data) + .await?; + + println!("✅ Successfully synced vault"); + println!("Vault contains {file_archives_len} file archive(s) and {registers_len} register(s)"); + Ok(()) +} + +pub async fn load(peers: Vec) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + let vault_sk = crate::keys::get_vault_secret_key()?; + + println!("Retrieving vault from network..."); + let user_data = client.get_user_data_from_vault(&vault_sk).await?; + println!("Writing user data to disk..."); + crate::user_data::write_local_user_data(&user_data)?; + + println!( + "✅ Successfully loaded vault with {} file archive(s) and {} register(s)", + user_data.file_archives.len(), + user_data.registers.len() + ); Ok(()) } diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index b5b24fe99b..8a103dd6f2 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -52,6 +52,7 @@ futures = "0.3.30" wasm-bindgen = "0.2.93" wasm-bindgen-futures = "0.4.43" serde-wasm-bindgen = "0.6.5" +sha2 = "0.10.6" [dev-dependencies] alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 9aa9fc2f48..4771d19e2a 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -20,8 +20,6 @@ pub mod fs; pub mod registers; #[cfg(feature = "vault")] pub mod vault; -#[cfg(feature = "vault")] -pub mod vault_user_data; #[cfg(target_arch = "wasm32")] pub mod wasm; diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 4004a3d530..bc514d2f79 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -6,22 +6,28 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use std::collections::HashSet; -use std::hash::{DefaultHasher, Hash, Hasher}; +pub mod key; +pub mod user_data; + +pub use key::{derive_vault_key, VaultSecretKey}; +pub use user_data::UserData; use crate::client::data::PutError; use crate::client::Client; -use bls::SecretKey; use libp2p::kad::{Quorum, Record}; -use sn_evm::EvmWallet; +use sn_evm::{Amount, AttoTokens, EvmWallet}; use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}; use sn_protocol::storage::{ try_serialize_record, RecordKind, RetryStrategy, Scratchpad, ScratchpadAddress, }; use sn_protocol::Bytes; use sn_protocol::{storage::try_deserialize_record, NetworkAddress}; +use std::collections::HashSet; +use std::hash::{DefaultHasher, Hash, Hasher}; use tracing::info; +use super::data::CostError; + #[derive(Debug, thiserror::Error)] pub enum VaultError { #[error("Could not generate Vault secret key from entropy: {0:?}")] @@ -53,7 +59,7 @@ impl Client { /// Returns the content type of the bytes in the vault pub async fn fetch_and_decrypt_vault( &self, - secret_key: &SecretKey, + secret_key: &VaultSecretKey, ) -> Result<(Bytes, VaultContentType), VaultError> { info!("Fetching and decrypting vault"); let pad = self.get_vault_from_network(secret_key).await?; @@ -65,7 +71,7 @@ impl Client { /// Gets the vault Scratchpad from a provided client public key async fn get_vault_from_network( &self, - secret_key: &SecretKey, + secret_key: &VaultSecretKey, ) -> Result { let client_pk = secret_key.public_key(); @@ -96,6 +102,26 @@ impl Client { Ok(pad) } + /// Get the cost of creating a new vault + pub async fn vault_cost(&self, owner: &VaultSecretKey) -> Result { + info!("Getting cost for vault"); + let client_pk = owner.public_key(); + let content_type = Default::default(); + let scratch = Scratchpad::new(client_pk, content_type); + let vault_xor = scratch.network_address().as_xorname().unwrap_or_default(); + + // NB TODO: vault should be priced differently from other data + let cost_map = self.get_store_quotes(std::iter::once(vault_xor)).await?; + let total_cost = AttoTokens::from_atto( + cost_map + .values() + .map(|quote| quote.2.cost.as_atto()) + .sum::(), + ); + + Ok(total_cost) + } + /// Put data into the client's VaultPacket /// /// Pays for a new VaultPacket if none yet created for the client. @@ -105,9 +131,10 @@ impl Client { &self, data: Bytes, wallet: &EvmWallet, - secret_key: &SecretKey, + secret_key: &VaultSecretKey, content_type: VaultContentType, - ) -> Result<(), PutError> { + ) -> Result { + let mut total_cost = AttoTokens::zero(); let client_pk = secret_key.public_key(); let pad_res = self.get_vault_from_network(secret_key).await; @@ -148,6 +175,7 @@ impl Client { let (payment_proofs, _) = self.pay(std::iter::once(scratch_xor), wallet).await?; // Should always be there, else it would have failed on the payment step. let proof = payment_proofs.get(&scratch_xor).expect("Missing proof"); + total_cost = proof.quote.cost; Record { key: scratch_key, @@ -200,6 +228,6 @@ impl Client { ) })?; - Ok(()) + Ok(total_cost) } } diff --git a/autonomi/src/client/vault/key.rs b/autonomi/src/client/vault/key.rs new file mode 100644 index 0000000000..9f555a7701 --- /dev/null +++ b/autonomi/src/client/vault/key.rs @@ -0,0 +1,43 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use sha2::{Digest, Sha256}; + +/// Secret key to decrypt vault content +pub type VaultSecretKey = bls::SecretKey; + +#[derive(Debug, thiserror::Error)] +pub enum VaultKeyError { + #[error("Failed to sign message: {0}")] + FailedToSignMessage(#[from] sn_evm::cryptography::SignError), + #[error("Failed to generate vault secret key: {0}")] + FailedToGenerateVaultSecretKey(String), +} + +/// Message used to generate the vault secret key from the EVM secret key +const VAULT_SECRET_KEY_SEED: &[u8] = b"Massive Array of Internet Disks Secure Access For Everyone"; + +/// Derives the vault secret key from the EVM secret key hex string +/// The EVM secret key is used to sign a message and the signature is hashed to derive the vault secret key +/// Being able to derive the vault secret key from the EVM secret key allows users to only keep track of one key: the EVM secret key +pub fn derive_vault_key(evm_sk_hex: &str) -> Result { + let signature = sn_evm::cryptography::sign_message(evm_sk_hex, VAULT_SECRET_KEY_SEED) + .map_err(VaultKeyError::FailedToSignMessage)?; + let hash = hash_to_32b(&signature); + + // NB TODO: not sure this is safe, we should ask Mav or find a better way to do this! + let root_sk = bls::SecretKey::default(); + let unique_key = root_sk.derive_child(&hash); + Ok(unique_key) +} + +fn hash_to_32b(msg: &[u8]) -> [u8; 32] { + let mut sha = Sha256::new(); + sha.update(msg); + sha.finalize().into() +} diff --git a/autonomi/src/client/vault_user_data.rs b/autonomi/src/client/vault/user_data.rs similarity index 86% rename from autonomi/src/client/vault_user_data.rs rename to autonomi/src/client/vault/user_data.rs index e40e73e260..736bd6292d 100644 --- a/autonomi/src/client/vault_user_data.rs +++ b/autonomi/src/client/vault/user_data.rs @@ -8,18 +8,17 @@ use std::collections::HashMap; -use super::archive::ArchiveAddr; -use super::data::GetError; -use super::data::PutError; -use super::registers::RegisterAddress; -use super::vault::VaultError; -use super::Client; -use crate::client::vault::{app_name_to_vault_content_type, VaultContentType}; -use bls::SecretKey; +use crate::client::archive::ArchiveAddr; +use crate::client::data::GetError; +use crate::client::data::PutError; +use crate::client::registers::RegisterAddress; +use crate::client::vault::VaultError; +use crate::client::vault::{app_name_to_vault_content_type, VaultContentType, VaultSecretKey}; +use crate::client::Client; use serde::{Deserialize, Serialize}; +use sn_evm::AttoTokens; use sn_evm::EvmWallet; use sn_protocol::Bytes; - use std::sync::LazyLock; /// Vault content type for UserDataVault @@ -95,7 +94,7 @@ impl Client { /// Get the user data from the vault pub async fn get_user_data_from_vault( &self, - secret_key: &SecretKey, + secret_key: &VaultSecretKey, ) -> Result { let (bytes, content_type) = self.fetch_and_decrypt_vault(secret_key).await?; @@ -115,22 +114,24 @@ impl Client { } /// Put the user data to the vault + /// Returns the total cost of the put operation pub async fn put_user_data_to_vault( &self, - secret_key: &SecretKey, + secret_key: &VaultSecretKey, wallet: &EvmWallet, user_data: UserData, - ) -> Result<(), PutError> { + ) -> Result { let bytes = user_data .to_bytes() .map_err(|e| PutError::Serialization(format!("Failed to serialize user data: {e}")))?; - self.write_bytes_to_vault( - bytes, - wallet, - secret_key, - *USER_DATA_VAULT_CONTENT_IDENTIFIER, - ) - .await?; - Ok(()) + let total_cost = self + .write_bytes_to_vault( + bytes, + wallet, + secret_key, + *USER_DATA_VAULT_CONTENT_IDENTIFIER, + ) + .await?; + Ok(total_cost) } } diff --git a/evmlib/src/cryptography.rs b/evmlib/src/cryptography.rs index ddc0149b43..02870942d9 100644 --- a/evmlib/src/cryptography.rs +++ b/evmlib/src/cryptography.rs @@ -8,8 +8,56 @@ use crate::common::Hash; use alloy::primitives::keccak256; +use alloy::signers::k256::ecdsa::{signature, RecoveryId, Signature, SigningKey}; +use alloy::signers::local::PrivateKeySigner; /// Hash data using Keccak256. pub fn hash>(data: T) -> Hash { keccak256(data.as_ref()) } + +/// Sign error +#[derive(Debug, thiserror::Error)] +pub enum SignError { + #[error("Failed to parse EVM secret key: {0}")] + InvalidEvmSecretKey(String), + #[error("Failed to sign message: {0}")] + Signature(#[from] signature::Error), +} + +/// Sign a message with an EVM secret key. +pub fn sign_message(evm_secret_key_str: &str, message: &[u8]) -> Result, SignError> { + let signer: PrivateKeySigner = + evm_secret_key_str + .parse::() + .map_err(|err| { + error!("Error parsing EVM secret key: {err}"); + SignError::InvalidEvmSecretKey(err.to_string()) + })?; + + let message_hash = to_eth_signed_message_hash(message); + let (signature, _) = sign_message_recoverable(&signer.into_credential(), message_hash)?; + Ok(signature.to_vec()) +} + +/// Hash a message using Keccak256, then add the Ethereum prefix and hash it again. +fn to_eth_signed_message_hash>(message: T) -> [u8; 32] { + const PREFIX: &str = "\x19Ethereum Signed Message:\n32"; + + let hashed_message = hash(message); + + let mut eth_message = Vec::with_capacity(PREFIX.len() + 32); + eth_message.extend_from_slice(PREFIX.as_bytes()); + eth_message.extend_from_slice(hashed_message.as_slice()); + + hash(eth_message).into() +} + +/// Sign a message with a recoverable public key. +fn sign_message_recoverable>( + secret_key: &SigningKey, + message: T, +) -> Result<(Signature, RecoveryId), signature::Error> { + let hash = to_eth_signed_message_hash(message); + secret_key.sign_prehash_recoverable(&hash) +} diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index fedb1afe68..49956db39e 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -13,6 +13,7 @@ pub use evmlib::common::Address as RewardsAddress; pub use evmlib::common::Address as EvmAddress; pub use evmlib::common::QuotePayment; pub use evmlib::common::{QuoteHash, TxHash}; +pub use evmlib::cryptography; #[cfg(feature = "external-signer")] pub use evmlib::external_signer; pub use evmlib::utils; From fd318ca488e5e9e75e440fd378a8c24ea9afb8c8 Mon Sep 17 00:00:00 2001 From: grumbach Date: Wed, 23 Oct 2024 20:32:48 +0900 Subject: [PATCH 061/128] fix: vault key security --- Cargo.lock | 36 ++++++++++++++++++++++++--- autonomi/Cargo.toml | 2 ++ autonomi/src/client/data.rs | 2 ++ autonomi/src/client/vault.rs | 7 ++++++ autonomi/src/client/vault/key.rs | 31 ++++++++++++++++------- sn_node/src/put_validation.rs | 3 ++- sn_protocol/src/storage/scratchpad.rs | 14 +++++++++++ 7 files changed, 82 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cba76198d1..0c7bc96529 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1080,6 +1080,8 @@ version = "0.2.0" dependencies = [ "alloy", "bip39", + "blst", + "blstrs 0.7.1", "blsttc", "bytes", "console_error_panic_hook", @@ -1421,7 +1423,23 @@ dependencies = [ "byte-slice-cast", "ff 0.12.1", "group 0.12.1", - "pairing", + "pairing 0.22.0", + "rand_core 0.6.4", + "serde", + "subtle", +] + +[[package]] +name = "blstrs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" +dependencies = [ + "blst", + "byte-slice-cast", + "ff 0.13.0", + "group 0.13.0", + "pairing 0.23.0", "rand_core 0.6.4", "serde", "subtle", @@ -1434,12 +1452,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1186a39763321a0b73d1a10aa4fc067c5d042308509e8f6cc31d2c2a7ac61ac2" dependencies = [ "blst", - "blstrs", + "blstrs 0.6.2", "ff 0.12.1", "group 0.12.1", "hex 0.4.3", "hex_fmt", - "pairing", + "pairing 0.22.0", "rand 0.8.5", "rand_chacha 0.3.1", "serde", @@ -2847,6 +2865,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec", "rand_core 0.6.4", "subtle", ] @@ -3780,7 +3799,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", + "rand 0.8.5", "rand_core 0.6.4", + "rand_xorshift 0.3.0", "subtle", ] @@ -6083,6 +6104,15 @@ dependencies = [ "group 0.12.1", ] +[[package]] +name = "pairing" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +dependencies = [ + "group 0.13.0", +] + [[package]] name = "pairing-plus" version = "0.19.0" diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 8a103dd6f2..08c0552dff 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -53,6 +53,8 @@ wasm-bindgen = "0.2.93" wasm-bindgen-futures = "0.4.43" serde-wasm-bindgen = "0.6.5" sha2 = "0.10.6" +blst = "0.3.13" +blstrs = "0.7.1" [dev-dependencies] alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index ce5cc4492d..903bd3f1ca 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -45,6 +45,8 @@ pub enum PutError { Serialization(String), #[error("A wallet error occurred.")] Wallet(#[from] sn_evm::EvmError), + #[error("The vault owner key does not match the client's public key")] + VaultBadOwner, } /// Errors that can occur during the pay operation. diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index bc514d2f79..eb6655cc24 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -149,6 +149,11 @@ impl Client { ); is_new = false; + + if existing_data.owner() != &client_pk { + return Err(PutError::VaultBadOwner); + } + existing_data } else { trace!("new scratchpad creation"); @@ -156,6 +161,8 @@ impl Client { }; let _next_count = scratch.update_and_sign(data, secret_key); + debug_assert!(scratch.is_valid(), "Must be valid after being signed. This is a bug, please report it by opening an issue on our github"); + let scratch_address = scratch.network_address(); let scratch_key = scratch_address.to_record_key(); diff --git a/autonomi/src/client/vault/key.rs b/autonomi/src/client/vault/key.rs index 9f555a7701..e88fd12ef7 100644 --- a/autonomi/src/client/vault/key.rs +++ b/autonomi/src/client/vault/key.rs @@ -6,6 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use blst::min_pk::SecretKey as BlstSecretKey; use sha2::{Digest, Sha256}; /// Secret key to decrypt vault content @@ -17,6 +18,10 @@ pub enum VaultKeyError { FailedToSignMessage(#[from] sn_evm::cryptography::SignError), #[error("Failed to generate vault secret key: {0}")] FailedToGenerateVaultSecretKey(String), + #[error("Failed to convert blst secret key to blsttc secret key: {0}")] + BlsConversionError(#[from] bls::Error), + #[error("Failed to generate blst secret key")] + KeyGenerationError, } /// Message used to generate the vault secret key from the EVM secret key @@ -28,16 +33,24 @@ const VAULT_SECRET_KEY_SEED: &[u8] = b"Massive Array of Internet Disks Secure Ac pub fn derive_vault_key(evm_sk_hex: &str) -> Result { let signature = sn_evm::cryptography::sign_message(evm_sk_hex, VAULT_SECRET_KEY_SEED) .map_err(VaultKeyError::FailedToSignMessage)?; - let hash = hash_to_32b(&signature); - // NB TODO: not sure this is safe, we should ask Mav or find a better way to do this! - let root_sk = bls::SecretKey::default(); - let unique_key = root_sk.derive_child(&hash); - Ok(unique_key) + let blst_key = derive_secret_key_from_seed(&signature)?; + let vault_sk = blst_to_blsttc(&blst_key)?; + Ok(vault_sk) } -fn hash_to_32b(msg: &[u8]) -> [u8; 32] { - let mut sha = Sha256::new(); - sha.update(msg); - sha.finalize().into() +/// Convert a blst secret key to a blsttc secret key and pray that endianness is the same +fn blst_to_blsttc(sk: &BlstSecretKey) -> Result { + let sk_bytes = sk.to_bytes(); + let sk = bls::SecretKey::from_bytes(sk_bytes).map_err(VaultKeyError::BlsConversionError)?; + Ok(sk) +} + +fn derive_secret_key_from_seed(seed: &[u8]) -> Result { + let mut hasher = Sha256::new(); + hasher.update(seed); + let hashed_seed = hasher.finalize(); + let sk = + BlstSecretKey::key_gen(&hashed_seed, &[]).map_err(|_| VaultKeyError::KeyGenerationError)?; + Ok(sk) } diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 94a1e2aebb..7ff575634d 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -396,7 +396,8 @@ impl Node { ) -> Result<()> { // owner PK is defined herein, so as long as record key and this match, we're good let addr = scratchpad.address(); - debug!("Validating and storing scratchpad {addr:?}"); + let count = scratchpad.count(); + debug!("Validating and storing scratchpad {addr:?} with count {count}"); // check if the deserialized value's RegisterAddress matches the record's key let scratchpad_key = NetworkAddress::ScratchpadAddress(*addr).to_record_key(); diff --git a/sn_protocol/src/storage/scratchpad.rs b/sn_protocol/src/storage/scratchpad.rs index 5c99cbdcac..94b5c633a5 100644 --- a/sn_protocol/src/storage/scratchpad.rs +++ b/sn_protocol/src/storage/scratchpad.rs @@ -141,3 +141,17 @@ impl Scratchpad { self.encrypted_data.len() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_scratchpad_is_valid() { + let sk = SecretKey::random(); + let pk = sk.public_key(); + let mut scratchpad = Scratchpad::new(pk, 42); + scratchpad.update_and_sign(Bytes::from_static(b"data to be encrypted"), &sk); + assert!(scratchpad.is_valid()); + } +} From f3c7d977407ab643eec6e886d303f54d899ac1ff Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 24 Oct 2024 12:30:07 +0900 Subject: [PATCH 062/128] feat: node side vault support, client side vault solidification --- autonomi-cli/src/access/user_data.rs | 4 +- autonomi/src/client/data.rs | 4 +- autonomi/src/client/vault.rs | 89 ++++++++++++++++++++------- sn_node/src/put_validation.rs | 21 +++++-- sn_protocol/src/storage/scratchpad.rs | 5 ++ 5 files changed, 91 insertions(+), 32 deletions(-) diff --git a/autonomi-cli/src/access/user_data.rs b/autonomi-cli/src/access/user_data.rs index ab3c791ce2..799c23c0d7 100644 --- a/autonomi-cli/src/access/user_data.rs +++ b/autonomi-cli/src/access/user_data.rs @@ -22,12 +22,12 @@ use super::{ }; pub fn get_local_user_data() -> Result { - let register_key = get_register_signing_key()?; + let register_sk = get_register_signing_key().map(|k| k.to_hex()).ok(); let registers = get_local_registers()?; let file_archives = get_local_file_archives()?; let user_data = UserData { - register_sk: Some(register_key.to_hex()), + register_sk, registers, file_archives, }; diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 903bd3f1ca..6fda246380 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -33,8 +33,6 @@ pub type ChunkAddr = XorName; pub enum PutError { #[error("Failed to self-encrypt data.")] SelfEncryption(#[from] crate::self_encryption::Error), - #[error("Error getting Vault XorName data.")] - VaultXorName, #[error("A network error occurred.")] Network(#[from] NetworkError), #[error("Error occurred during cost estimation.")] @@ -47,6 +45,8 @@ pub enum PutError { Wallet(#[from] sn_evm::EvmError), #[error("The vault owner key does not match the client's public key")] VaultBadOwner, + #[error("Payment unexpectedly invalid for {0:?}")] + PaymentUnexpectedlyInvalid(NetworkAddress), } /// Errors that can occur during the pay operation. diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index eb6655cc24..8c966e499c 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -11,12 +11,13 @@ pub mod user_data; pub use key::{derive_vault_key, VaultSecretKey}; pub use user_data::UserData; +use xor_name::XorName; use crate::client::data::PutError; use crate::client::Client; use libp2p::kad::{Quorum, Record}; use sn_evm::{Amount, AttoTokens, EvmWallet}; -use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}; +use sn_networking::{GetRecordCfg, GetRecordError, NetworkError, PutRecordCfg, VerificationKind}; use sn_protocol::storage::{ try_serialize_record, RecordKind, RetryStrategy, Scratchpad, ScratchpadAddress, }; @@ -38,6 +39,8 @@ pub enum VaultError { Protocol(#[from] sn_protocol::Error), #[error("Network: {0}")] Network(#[from] NetworkError), + #[error("Vault not found")] + Missing, } /// The content type of the vault data @@ -88,16 +91,54 @@ impl Client { is_register: false, }; - let record = self + let pad = match self .network - .get_record_from_network(scratch_key, &get_cfg) + .get_record_from_network(scratch_key.clone(), &get_cfg) .await - .inspect_err(|err| { - debug!("Failed to fetch vault {network_address:?} from network: {err}"); - })?; - - let pad = try_deserialize_record::(&record) - .map_err(|_| VaultError::CouldNotDeserializeVaultScratchPad(scratch_address))?; + { + Ok(record) => { + debug!("Got scratchpad for {scratch_key:?}"); + try_deserialize_record::(&record) + .map_err(|_| VaultError::CouldNotDeserializeVaultScratchPad(scratch_address))? + } + Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { + debug!("Got multiple scratchpads for {scratch_key:?}"); + let mut pads = result_map + .values() + .map(|(record, _)| try_deserialize_record::(&record)) + .collect::, _>>() + .map_err(|_| VaultError::CouldNotDeserializeVaultScratchPad(scratch_address))?; + + // take the latest versions + pads.sort_by_key(|s| s.count()); + let max_version = pads.last().map(|p| p.count()).unwrap_or_else(|| { + error!("Got empty scratchpad vector for {scratch_key:?}"); + u64::MAX + }); + let latest_pads: Vec<_> = pads + .into_iter() + .filter(|s| s.count() == max_version) + .collect(); + + // make sure we only have one of latest version + let pad = match &latest_pads[..] { + [one] => one, + [multi, ..] => { + error!("Got multiple conflicting scratchpads for {scratch_key:?} with the latest version, returning the first one"); + multi + } + [] => { + error!("Got empty scratchpad vector for {scratch_key:?}"); + return Err(VaultError::Missing); + } + }; + pad.to_owned() + } + Err(e) => { + warn!("Failed to fetch vault {network_address:?} from network: {e}"); + return Err(e)?; + } + }; Ok(pad) } @@ -160,7 +201,7 @@ impl Client { Scratchpad::new(client_pk, content_type) }; - let _next_count = scratch.update_and_sign(data, secret_key); + let _ = scratch.update_and_sign(data, secret_key); debug_assert!(scratch.is_valid(), "Must be valid after being signed. This is a bug, please report it by opening an issue on our github"); let scratch_address = scratch.network_address(); @@ -169,19 +210,21 @@ impl Client { info!("Writing to vault at {scratch_address:?}",); let record = if is_new { - self.pay( - [&scratch_address].iter().filter_map(|f| f.as_xorname()), - wallet, - ) - .await - .inspect_err(|err| { - error!("Failed to pay for new vault at addr: {scratch_address:?} : {err}"); - })?; - - let scratch_xor = scratch_address.as_xorname().ok_or(PutError::VaultXorName)?; - let (payment_proofs, _) = self.pay(std::iter::once(scratch_xor), wallet).await?; - // Should always be there, else it would have failed on the payment step. - let proof = payment_proofs.get(&scratch_xor).expect("Missing proof"); + let scratch_xor = [&scratch_address] + .iter() + .filter_map(|f| f.as_xorname()) + .collect::>(); + let (payment_proofs, _) = self + .pay(scratch_xor.iter().cloned(), wallet) + .await + .inspect_err(|err| { + error!("Failed to pay for new vault at addr: {scratch_address:?} : {err}"); + })?; + + let proof = match payment_proofs.values().next() { + Some(proof) => proof, + None => return Err(PutError::PaymentUnexpectedlyInvalid(scratch_address)), + }; total_cost = proof.quote.cost; Record { diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 7ff575634d..5a5dac140b 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -137,10 +137,22 @@ impl Node { store_scratchpad_result } RecordKind::Scratchpad => { - error!("Scratchpad should not be validated at this point"); - Err(Error::InvalidPutWithoutPayment( - PrettyPrintRecordKey::from(&record.key).into_owned(), - )) + // make sure we already have this scratchpad locally, else reject it as first time upload needs payment + let key = record.key.clone(); + let scratchpad = try_deserialize_record::(&record)?; + let net_addr = NetworkAddress::ScratchpadAddress(*scratchpad.address()); + let pretty_key = PrettyPrintRecordKey::from(&key); + trace!("Got record to store without payment for scratchpad at {pretty_key:?}"); + if !self.validate_key_and_existence(&net_addr, &key).await? { + warn!("Ignore store without payment for scratchpad at {pretty_key:?}"); + return Err(Error::InvalidPutWithoutPayment( + PrettyPrintRecordKey::from(&record.key).into_owned(), + )); + } + + // store the scratchpad + self.validate_and_store_scratchpad_record(scratchpad, key, false) + .await } RecordKind::Spend => { let record_key = record.key.clone(); @@ -387,7 +399,6 @@ impl Node { /// Check Counter: It MUST ensure that the new counter value is strictly greater than the currently stored value to prevent replay attacks. /// Verify Signature: It MUST use the public key to verify the BLS12-381 signature against the content hash and the counter. /// Accept or Reject: If all verifications succeed, the node MUST accept the packet and replace any previous version. Otherwise, it MUST reject the update. - pub(crate) async fn validate_and_store_scratchpad_record( &self, scratchpad: Scratchpad, diff --git a/sn_protocol/src/storage/scratchpad.rs b/sn_protocol/src/storage/scratchpad.rs index 94b5c633a5..36a42f1c42 100644 --- a/sn_protocol/src/storage/scratchpad.rs +++ b/sn_protocol/src/storage/scratchpad.rs @@ -52,6 +52,11 @@ impl Scratchpad { self.counter } + /// Set counter manually + pub fn set_count(&mut self, c: u64) { + self.counter = c; + } + /// Return the current data encoding pub fn data_encoding(&self) -> u64 { self.data_encoding From 8b3579b3f7842db233ef994bef3ff45fc81ecafb Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 24 Oct 2024 12:44:58 +0900 Subject: [PATCH 063/128] chore: small cleanups --- autonomi/src/client/vault.rs | 2 +- sn_protocol/src/storage/scratchpad.rs | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 8c966e499c..55103b0578 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -105,7 +105,7 @@ impl Client { debug!("Got multiple scratchpads for {scratch_key:?}"); let mut pads = result_map .values() - .map(|(record, _)| try_deserialize_record::(&record)) + .map(|(record, _)| try_deserialize_record::(record)) .collect::, _>>() .map_err(|_| VaultError::CouldNotDeserializeVaultScratchPad(scratch_address))?; diff --git a/sn_protocol/src/storage/scratchpad.rs b/sn_protocol/src/storage/scratchpad.rs index 36a42f1c42..94b5c633a5 100644 --- a/sn_protocol/src/storage/scratchpad.rs +++ b/sn_protocol/src/storage/scratchpad.rs @@ -52,11 +52,6 @@ impl Scratchpad { self.counter } - /// Set counter manually - pub fn set_count(&mut self, c: u64) { - self.counter = c; - } - /// Return the current data encoding pub fn data_encoding(&self) -> u64 { self.data_encoding From 022b08a76b66b986e926e2bfd029dfda26d1d1e0 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 23 Oct 2024 10:02:04 +0900 Subject: [PATCH 064/128] feat(networking): decrease distance of set range from get_closest calls --- sn_networking/src/driver.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 27cc340e8e..30b57f9239 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -922,7 +922,7 @@ impl SwarmDriver { let farthest_peer_to_check = self .get_all_local_peers_excluding_self() .len() - .checked_div(3 * CLOSE_GROUP_SIZE) + .checked_div(5 * CLOSE_GROUP_SIZE) .unwrap_or(1); info!("Farthest peer we'll check: {:?}", farthest_peer_to_check); From bc5d98775f172699d20b614c48d1b72d8069470d Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 22 Oct 2024 10:39:53 +0900 Subject: [PATCH 065/128] chore(networking): refactor handle_get_record_finished Clarifies functions, aims to reduce some clones --- sn_networking/src/event/kad.rs | 229 ++++++++++++++++++--------------- 1 file changed, 125 insertions(+), 104 deletions(-) diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 8e903a00ec..952c6def55 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -493,128 +493,149 @@ impl SwarmDriver { /// SplitRecord if there are multiple content hash versions. fn handle_get_record_finished(&mut self, query_id: QueryId, step: ProgressStep) -> Result<()> { // return error if the entry cannot be found - if let Some((_key, senders, result_map, cfg)) = self.pending_get_record.remove(&query_id) { + if let Some((r_key, senders, result_map, cfg)) = self.pending_get_record.remove(&query_id) { let num_of_versions = result_map.len(); - let (result, log_string) = if let Some((record, from_peers)) = - result_map.values().next() - { - let data_key_address = NetworkAddress::from_record_key(&record.key); - let expected_get_range = self.get_request_range(); - - let we_have_searched_thoroughly = Self::have_we_have_searched_thoroughly_for_quorum( - expected_get_range, - from_peers, - &data_key_address, - &cfg.get_quorum, - ); + let data_key_address = NetworkAddress::from_record_key(&r_key); + let expected_get_range = self.get_request_range(); + let all_seen_peers: HashSet<_> = result_map + .values() + .flat_map(|(_, peers)| peers) + .cloned() + .collect(); + let we_have_searched_thoroughly = Self::have_we_have_searched_thoroughly_for_quorum( + expected_get_range, + &all_seen_peers, + &data_key_address, + &cfg.get_quorum, + ); + + // we have a split record, return it + if num_of_versions > 1 { + warn!("RANGE: Multiple versions found over range"); + for sender in senders { + sender + .send(Err(GetRecordError::SplitRecord { + result_map: result_map.clone(), + })) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } - let pretty_key = PrettyPrintRecordKey::from(&record.key); - info!("RANGE: {pretty_key:?} we_have_searched_far_enough: {we_have_searched_thoroughly:?}"); + for (record, _peers) in result_map.values() { + self.reput_data_to_range(&record, &data_key_address, &all_seen_peers)?; + } - let result = if num_of_versions > 1 { - warn!("RANGE: more than one version found!"); - Err(GetRecordError::SplitRecord { - result_map: result_map.clone(), - }) - } else if we_have_searched_thoroughly { - warn!("RANGE: Get record finished: {pretty_key:?} Enough of the network has responded or it's not sensitive data... and we only have one copy..."); + return Ok(()); + } - Ok(record.clone()) - } else { - // We have not searched enough of the network range. - let result = Err(GetRecordError::NotEnoughCopiesInRange { - record: record.clone(), - expected: get_quorum_value(&cfg.get_quorum), - got: from_peers.len(), - range: expected_get_range.ilog2().unwrap_or(0), - }); + // we have no results, bail + if num_of_versions == 0 { + warn!("RANGE: No versions found!"); + for sender in senders { + sender + .send(Err(GetRecordError::RecordNotFound)) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } + return Ok(()); + } - // This should be a backstop... Quorum::All is the only one that enforces - // a full search of the network range. - if matches!(cfg.get_quorum, Quorum::All) { - warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need to extend the range and PUT the data. {result:?}"); + // if we have searched thoroughly, we can return the record + if num_of_versions == 1 { + let result = if let Some((record, peers)) = result_map.values().next() { + warn!("RANGE: one version found!"); + + if we_have_searched_thoroughly { + Ok(record.clone()) + } else { + self.reput_data_to_range(record, &data_key_address, &all_seen_peers)?; + Err(GetRecordError::NotEnoughCopiesInRange { + record: record.clone(), + expected: get_quorum_value(&cfg.get_quorum), + got: peers.len(), + range: expected_get_range.ilog2().unwrap_or(0), + }) + } + } else { + debug!("Getting record task {query_id:?} completed with step count {:?}, but no copy found.", step.count); + Err(GetRecordError::RecordNotFound) + }; + for sender in senders { + sender + .send(result.clone()) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } - warn!("Reputting data to network {pretty_key:?}..."); + #[cfg(feature = "open-metrics")] + if self.metrics_recorder.is_some() { + self.check_for_change_in_our_close_group(); + } + } + } else { + debug!("Can't locate query task {query_id:?} during GetRecord finished. We might have already returned the result to the sender."); + } + Ok(()) + } - // let's ensure we have an updated network view - self.trigger_network_discovery(); + /// Repost data to the network if we didn't get enough responses. + fn reput_data_to_range( + &mut self, + record: &Record, + data_key_address: &NetworkAddress, + // all peers who responded with any version of the record + from_peers: &HashSet, + ) -> Result<()> { + let pretty_key = PrettyPrintRecordKey::from(&record.key); + // This should be a backstop... Quorum::All is the only one that enforces + // a full search of the network range. + info!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has the record, or same state, we need to extend the range and PUT the data."); - warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need PUT the data back into nodes in that range."); + info!("Reputting data to network {pretty_key:?}..."); - let record_type = get_type_from_record(record)?; + // let's ensure we have an updated network view + self.trigger_network_discovery(); - let replicate_targets: HashSet<_> = self - .get_filtered_peers_exceeding_range_or_closest_nodes(&data_key_address) - .iter() - .cloned() - .collect(); + warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need PUT the data back into nodes in that range."); - if from_peers == &replicate_targets { - warn!("RANGE: {pretty_key:?} We asked everyone we know of in that range already!"); - } + let record_type = get_type_from_record(&record)?; - // set holder to someone that has the data - let holder = NetworkAddress::from_peer( - from_peers - .iter() - .next() - .cloned() - .unwrap_or(self.self_peer_id), - ); + let replicate_targets: HashSet<_> = self + .get_filtered_peers_exceeding_range_or_closest_nodes(&data_key_address) + .iter() + .cloned() + .collect(); - for peer in replicate_targets { - warn!("Reputting data to {peer:?} for {pretty_key:?} if needed..."); - // Do not send to any peer that has already informed us - if from_peers.contains(&peer) { - continue; - } - - debug!("RANGE: (insufficient, so ) Sending data to unresponded peer: {peer:?} for {pretty_key:?}"); - - // nodes will try/fail to trplicate it from us, but grab from the network thereafter - self.queue_network_swarm_cmd(NetworkSwarmCmd::SendRequest { - req: Request::Cmd(Cmd::Replicate { - holder: holder.clone(), - keys: vec![(data_key_address.clone(), record_type.clone())], - }), - peer, - sender: None, - }); - } - } + if from_peers == &replicate_targets { + warn!("RANGE: {pretty_key:?} We asked everyone we know of in that range already!"); + } - result - }; + // set holder to someone that has the data + let holder = NetworkAddress::from_peer( + from_peers + .iter() + .next() + .cloned() + .unwrap_or(self.self_peer_id), + ); - ( - result, - format!("Getting record {:?} completed with only {:?} copies received, and {num_of_versions} versions.", - PrettyPrintRecordKey::from(&record.key), usize::from(step.count) - 1) - ) - } else { - ( - Err(GetRecordError::RecordNotFound), - format!("Getting record task {query_id:?} completed with step count {:?}, but no copy found.", step.count), - ) - }; - - if cfg.expected_holders.is_empty() { - debug!("{log_string}"); - } else { - debug!( - "{log_string}, and {:?} expected holders not responded", - cfg.expected_holders - ); + for peer in replicate_targets { + warn!("Reputting data to {peer:?} for {pretty_key:?} if needed..."); + // Do not send to any peer that has already informed us + if from_peers.contains(&peer) { + continue; } - for sender in senders { - sender - .send(result.clone()) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } - } else { - debug!("Can't locate query task {query_id:?} during GetRecord finished. We might have already returned the result to the sender."); + debug!("RANGE: (insufficient, so ) Sending data to unresponded peer: {peer:?} for {pretty_key:?}"); + + // nodes will try/fail to trplicate it from us, but grab from the network thereafter + self.queue_network_swarm_cmd(NetworkSwarmCmd::SendRequest { + req: Request::Cmd(Cmd::Replicate { + holder: holder.clone(), + keys: vec![(data_key_address.clone(), record_type.clone())], + }), + peer, + sender: None, + }); } + Ok(()) } From 8bbf7d062b343bc412ad690cecf6fc1424f08e3c Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 24 Oct 2024 12:58:39 +0900 Subject: [PATCH 066/128] fix: wasm --- autonomi-cli/Cargo.toml | 1 + autonomi/src/client/wasm.rs | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index 20c26e43d6..64587ff2b3 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -27,6 +27,7 @@ harness = false autonomi = { path = "../autonomi", version = "0.2.0", features = [ "data", "fs", + "vault", "registers", "loud", ] } diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index bbbb3461db..737807f5ad 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -2,7 +2,9 @@ use libp2p::Multiaddr; use wasm_bindgen::prelude::*; use super::address::{addr_to_str, str_to_addr}; -use super::vault_user_data::UserData; + +#[cfg(feature = "vault")] +use super::vault::UserData; #[wasm_bindgen(js_name = Client)] pub struct JsClient(super::Client); From deae93ea21af14569cd3c3d107acddad7b53c946 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 23 Oct 2024 15:07:38 +0900 Subject: [PATCH 067/128] chore: only do chunk validation some of the time --- sn_networking/src/event/request_response.rs | 92 +++++++++++---------- 1 file changed, 48 insertions(+), 44 deletions(-) diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index ca6808ed1b..2bfee3a28a 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -241,56 +241,60 @@ impl SwarmDriver { } let event_sender = self.event_sender.clone(); - let _handle = tokio::spawn(async move { - let keys_to_verify = - Self::select_verification_data_candidates(&peers, &all_keys, &sender); + if OsRng.gen_bool(0.01) { + let _handle = tokio::spawn(async move { + // Only run 10% of the time + let keys_to_verify = + Self::select_verification_data_candidates(&peers, &all_keys, &sender); - if keys_to_verify.is_empty() { - debug!("No valid candidate to be checked against peer {holder:?}"); - } else if let Err(error) = event_sender - .send(NetworkEvent::ChunkProofVerification { - peer_id: holder, - keys_to_verify, - }) - .await - { - error!("SwarmDriver failed to send event: {}", error); - } + if keys_to_verify.is_empty() { + debug!("No valid candidate to be checked against peer {holder:?}"); + } else if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: holder, + keys_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); + } - // In additon to verify the sender, we also verify a random close node. - // This is to avoid malicious node escaping the check by never send a replication_list. - // With further reduced probability of 1% (5% * 20%) - let close_group_peers = sort_peers_by_address_and_limit( - &peers, - &NetworkAddress::from_peer(our_peer_id), - CLOSE_GROUP_SIZE, - ) - .unwrap_or_default(); + // In additon to verify the sender, we also verify a random close node. + // This is to avoid malicious node escaping the check by never send a replication_list. + // With further reduced probability of 1% (5% * 20%) + let close_group_peers = sort_peers_by_address_and_limit( + &peers, + &NetworkAddress::from_peer(our_peer_id), + CLOSE_GROUP_SIZE, + ) + .unwrap_or_default(); - loop { - let index: usize = OsRng.gen_range(0..close_group_peers.len()); - let candidate_peer_id = *close_group_peers[index]; - let candidate = NetworkAddress::from_peer(*close_group_peers[index]); - if sender != candidate { - let keys_to_verify = - Self::select_verification_data_candidates(&peers, &all_keys, &candidate); + loop { + let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate_peer_id = *close_group_peers[index]; + let candidate = NetworkAddress::from_peer(*close_group_peers[index]); + if sender != candidate { + let keys_to_verify = Self::select_verification_data_candidates( + &peers, &all_keys, &candidate, + ); - if keys_to_verify.is_empty() { - debug!("No valid candidate to be checked against peer {candidate:?}"); - } else if let Err(error) = event_sender - .send(NetworkEvent::ChunkProofVerification { - peer_id: candidate_peer_id, - keys_to_verify, - }) - .await - { - error!("SwarmDriver failed to send event: {}", error); - } + if keys_to_verify.is_empty() { + debug!("No valid candidate to be checked against peer {candidate:?}"); + } else if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: candidate_peer_id, + keys_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); + } - break; + break; + } } - } - }); + }); + } } /// Check among all chunk type records that we have, select those close to the peer, From a19763c70ae3923ccabb0685693845daf062397c Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 24 Oct 2024 09:45:09 +0900 Subject: [PATCH 068/128] chore(networking): choose a smaller range --- sn_networking/src/driver.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 30b57f9239..af80223a84 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -957,7 +957,7 @@ impl SwarmDriver { sorted_distances.sort_unstable(); - let median_index = sorted_distances.len() / 2; + let median_index = sorted_distances.len() / 8; let default = KBucketDistance::default(); let median = sorted_distances.get(median_index).cloned(); From 526b19a918e88c2b3dfd17a492264da35db1b893 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 24 Oct 2024 12:46:17 +0900 Subject: [PATCH 069/128] chore(networking): passive discovery not on forced repl --- sn_networking/src/event/kad.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 952c6def55..b6317cbd33 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -590,9 +590,6 @@ impl SwarmDriver { info!("Reputting data to network {pretty_key:?}..."); - // let's ensure we have an updated network view - self.trigger_network_discovery(); - warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need PUT the data back into nodes in that range."); let record_type = get_type_from_record(&record)?; From a9b5323c4b02490b7bcc7cf00684975333a2ee90 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 24 Oct 2024 14:29:40 +0900 Subject: [PATCH 070/128] chore(networking): only validate chunks from periodic replication calls --- sn_networking/src/event/kad.rs | 6 +++--- sn_networking/src/event/request_response.rs | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index b6317cbd33..776d868e0d 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -521,7 +521,7 @@ impl SwarmDriver { } for (record, _peers) in result_map.values() { - self.reput_data_to_range(&record, &data_key_address, &all_seen_peers)?; + self.reput_data_to_range(record, &data_key_address, &all_seen_peers)?; } return Ok(()); @@ -592,10 +592,10 @@ impl SwarmDriver { warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need PUT the data back into nodes in that range."); - let record_type = get_type_from_record(&record)?; + let record_type = get_type_from_record(record)?; let replicate_targets: HashSet<_> = self - .get_filtered_peers_exceeding_range_or_closest_nodes(&data_key_address) + .get_filtered_peers_exceeding_range_or_closest_nodes(data_key_address) .iter() .cloned() .collect(); diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 2bfee3a28a..b0c9344724 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -196,6 +196,7 @@ impl SwarmDriver { ) { let peers = self.get_all_local_peers_excluding_self(); let our_peer_id = self.self_peer_id; + let more_than_one_key = incoming_keys.len() > 1; let holder = if let Some(peer_id) = sender.as_peer_id() { peer_id @@ -241,7 +242,7 @@ impl SwarmDriver { } let event_sender = self.event_sender.clone(); - if OsRng.gen_bool(0.01) { + if more_than_one_key && OsRng.gen_bool(0.1) { let _handle = tokio::spawn(async move { // Only run 10% of the time let keys_to_verify = From 1112d6d58aae9525dabb85738664152232336170 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 24 Oct 2024 14:33:10 +0900 Subject: [PATCH 071/128] fix: do not retry for non-bad peer connections This appears to leave us in a loop, higher levels can attempt to dial again --- sn_networking/src/event/swarm.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 2416b5681c..90a3939f47 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::NetworkSwarmCmd, event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, - relay_manager::is_a_relayed_peer, target_arch::Instant, NetworkEvent, Result, SwarmDriver, + event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, relay_manager::is_a_relayed_peer, + target_arch::Instant, NetworkEvent, Result, SwarmDriver, }; #[cfg(feature = "local")] use libp2p::mdns; @@ -25,7 +25,7 @@ use libp2p::{ }; use sn_protocol::version::{IDENTIFY_NODE_VERSION_STR, IDENTIFY_PROTOCOL_STR}; use std::collections::HashSet; -use tokio::{sync::oneshot, time::Duration}; +use tokio::time::Duration; impl SwarmDriver { /// Handle `SwarmEvents` @@ -514,15 +514,6 @@ impl SwarmDriver { self.update_on_peer_removal(*dead_peer.node.key.preimage()); } } - - if !should_clean_peer { - // lets try and redial. - for addr in failed_peer_addresses { - let (sender, _recv) = oneshot::channel(); - - self.queue_network_swarm_cmd(NetworkSwarmCmd::Dial { addr, sender }); - } - } } SwarmEvent::IncomingConnectionError { connection_id, From c612b04e65bd554b216eb803c17b0b4d157595b7 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 24 Oct 2024 11:01:17 +0200 Subject: [PATCH 072/128] feat: add `getEvmNetwork` function to wasm client --- autonomi/src/client/wasm.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index 737807f5ad..0d395c0d4f 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -301,6 +301,14 @@ pub fn gen_secret_key() -> SecretKeyJs { SecretKeyJs(secret_key) } +/// Get the current `EvmNetwork` that was set using environment variables that were used during the build process of this library. +#[wasm_bindgen(js_name = getEvmNetwork)] +pub fn evm_network() -> Result { + let evm_network = evmlib::utils::get_evm_network_from_env()?; + let js_value = serde_wasm_bindgen::to_value(&evm_network)?; + Ok(js_value) +} + #[wasm_bindgen(js_name = Wallet)] pub struct JsWallet(evmlib::wallet::Wallet); From 538985dabd379a8923704d06e1f33f0640ef0b45 Mon Sep 17 00:00:00 2001 From: qima Date: Thu, 24 Oct 2024 17:39:55 +0800 Subject: [PATCH 073/128] chore(CI): reduce peer_removed expectation due to code change --- .github/workflows/merge.yml | 40 ++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index da6914f65b..fa94260975 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -627,6 +627,10 @@ jobs: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 30 + # Sleep for a while to allow restarted nodes can be detected by others + - name: Sleep a while + run: sleep 300 + - name: Stop the local network and upload logs if: always() uses: maidsafe/sn-local-testnet-action@main @@ -653,6 +657,10 @@ jobs: rg "(\d+) matches" | rg "\d+" -o) echo "Restarted $restart_count nodes" + # `PeerRemovedFromRoutingTable` now only happens when a peer reported as `BadNode`. + # Otherwise kad will remove a `dropped out node` directly from RT. + # So, the detection of the removal explicity will now have much less chance, + # due to the removal of connection_issue tracking. - name: Get peers removed from nodes using rg shell: bash timeout-minutes: 1 @@ -665,24 +673,6 @@ jobs: fi echo "PeerRemovedFromRoutingTable $peer_removed times" - - name: Verify peers removed exceed restarted node counts - shell: bash - timeout-minutes: 1 - # get the counts, then the specific line, and then the digit count only - # then check we have an expected level of restarts - # TODO: make this use an env var, or relate to testnet size - run: | - restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Restart $restart_count nodes" - peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "PeerRemovedFromRoutingTable $peer_removed times" - if [ $peer_removed -lt $restart_count ]; then - echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - exit 1 - fi - # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here # if [ $restart_count -lt $node_count ]; then # echo "Restart count of: $restart_count is less than the node count of: $node_count" @@ -795,6 +785,10 @@ jobs: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 + # Sleep for a while to allow restarted nodes can be detected by others + - name: Sleep a while + run: sleep 300 + - name: Stop the local network and upload logs if: always() uses: maidsafe/sn-local-testnet-action@main @@ -808,7 +802,11 @@ jobs: timeout-minutes: 1 # get the counts, then the specific line, and then the digit count only # then check we have an expected level of restarts - # TODO: make this use an env var, or relate to testnet size + # + # `PeerRemovedFromRoutingTable` now only happens when a peer reported as `BadNode`. + # Otherwise kad will remove a `dropped out node` directly from RT. + # So, the detection of the removal explicity will now have much less chance, + # due to the removal of connection_issue tracking. run: | restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ rg "(\d+) matches" | rg "\d+" -o) @@ -816,8 +814,8 @@ jobs: peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ rg "(\d+) matches" | rg "\d+" -o) echo "PeerRemovedFromRoutingTable $peer_removed times" - if [ $peer_removed -lt $restart_count ]; then - echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" + if [ -z "$peer_removed" ]; then + echo "No peer removal count found" exit 1 fi node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) From 2916bea340ae66059f17c4161b4ba9957884d988 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Thu, 24 Oct 2024 16:42:04 +0530 Subject: [PATCH 074/128] fix(ci): make the nightly tests work with evm --- .github/workflows/merge.yml | 4 +- .github/workflows/nightly.yml | 494 ++++++++++++++-------------------- 2 files changed, 208 insertions(+), 290 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index da6914f65b..02792a3069 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -186,7 +186,7 @@ jobs: echo "EVM_NETWORK has been set to $EVM_NETWORK" fi - # only these unit tests require a network, the rest are run above + # only these unit tests require a network, the rest are run above in unit test section - name: Run autonomi --tests run: cargo test --package autonomi --tests -- --nocapture env: @@ -1125,7 +1125,7 @@ jobs: run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources > ./download_output 2>&1 env: SN_LOG: "v" - timeout-minutes: 2 + timeout-minutes: 5 - name: showing the download terminal output run: | diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index aac0ac9ad4..843507abff 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -15,7 +15,13 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, windows-latest, macos-latest] + include: + - os: ubuntu-latest + safe_path: /home/runner/.local/share/safe + - os: windows-latest + safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + - os: macos-latest + safe_path: /Users/runner/Library/Application\ Support/safe steps: - uses: actions/checkout@v4 @@ -26,77 +32,181 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --bin safenode --bin safe --bin faucet + run: cargo build --release --features local --bin safenode --bin autonomi timeout-minutes: 30 - name: Start a local network uses: maidsafe/sn-local-testnet-action@main with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ${{ matrix.os }} build: true - - name: Check contact peer + - name: Check if SAFE_PEERS and EVM_NETWORK are set shell: bash - run: echo "Peer is $SAFE_PEERS" + run: | + if [[ -z "$SAFE_PEERS" ]]; then + echo "The SAFE_PEERS variable has not been set" + exit 1 + elif [[ -z "$EVM_NETWORK" ]]; then + echo "The EVM_NETWORK variable has not been set" + exit 1 + else + echo "SAFE_PEERS has been set to $SAFE_PEERS" + echo "EVM_NETWORK has been set to $EVM_NETWORK" + fi # only these unit tests require a network, the rest are run above in unit test section - - name: Run sn_client --tests - run: cargo test --package sn_client --release --tests + - name: Run autonomi --tests + run: cargo test --package autonomi --tests -- --nocapture env: - SN_LOG: "all" + SN_LOG: "v" # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 15 - - name: Create and fund a wallet to pay for files storage + + # FIXME: do this in a generic way for localtestnets + - name: export default secret key + if: matrix.os != 'windows-latest' + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash + - name: Set secret key for Windows + if: matrix.os == 'windows-latest' + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh + + - name: Get file cost + run: ./target/release/autonomi --log-output-dest=data-dir file cost "./resources" + env: + SN_LOG: "v" + timeout-minutes: 15 + + - name: File upload + run: ./target/release/autonomi --log-output-dest=data-dir file upload "./resources" > ./upload_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 15 + + - name: parse address (unix) + if: matrix.os != 'windows-latest' run: | - cargo run --bin faucet --release -- --log-output-dest=data-dir send 1000000 $(cargo run --bin safe --release -- --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - cargo run --bin safe --release -- --log-output-dest=data-dir wallet receive --file transfer_hex + UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output) + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: parse address (win) + if: matrix.os == 'windows-latest' + run: | + $UPLOAD_ADDRESS = rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh + + - name: File Download + run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources env: - SN_LOG: "all" - timeout-minutes: 2 + SN_LOG: "v" + timeout-minutes: 5 + + - name: Generate register signing key + run: ./target/release/autonomi --log-output-dest=data-dir register generate-key - - name: Start a client to carry out chunk actions - run: cargo run --bin safe --release -- --log-output-dest=data-dir files upload "./resources" --retry-strategy quick + - name: Create register (writeable by owner) + run: ./target/release/autonomi --log-output-dest=data-dir register create baobao 123 > ./register_create_output 2>&1 env: - SN_LOG: "all" - timeout-minutes: 2 + SN_LOG: "v" + timeout-minutes: 10 + + - name: parse register address (unix) + if: matrix.os != 'windows-latest' + run: | + REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output) + echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: parse register address (win) + if: matrix.os == 'windows-latest' + run: | + $REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output + echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh - # Client FoldersApi tests against local network - - name: Client FoldersApi tests against local network - run: cargo test --release --package sn_client --test folders_api + - name: Get register + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} env: - SN_LOG: "all" + SN_LOG: "v" + timeout-minutes: 5 + + - name: Edit register + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.REGISTER_ADDRESS }} 456 + env: + SN_LOG: "v" timeout-minutes: 10 - # CLI Acc-Packet files and folders tests against local network - - name: CLI Acc-Packet files and folders tests - run: cargo test --release -p sn_cli test_acc_packet -- --nocapture + - name: Get register (after edit) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} env: - SN_LOG: "all" + SN_LOG: "v" + timeout-minutes: 5 + + - name: Create Public Register (writeable by anyone) + run: ./target/release/autonomi --log-output-dest=data-dir register create bao 111 --public > ./register_public_create_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 5 + + - name: parse public register address (unix) + if: matrix.os != 'windows-latest' + run: | + PUBLIC_REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output) + echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: parse public register address (win) + if: matrix.os == 'windows-latest' + run: | + $PUBLIC_REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output + echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh + + - name: Get Public Register (current key is the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} + env: + SN_LOG: "v" + timeout-minutes: 5 + + - name: Edit Public Register (current key is the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 222 + env: + SN_LOG: "v" timeout-minutes: 10 - - name: Start a client to create a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register create -n baobao + - name: Delete current register signing key + shell: bash + run: rm -rf ${{ matrix.safe_path }}/autonomi + + - name: Generate new register signing key + run: ./target/release/autonomi --log-output-dest=data-dir register generate-key + + - name: Get Public Register (new signing key is not the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 2 - - name: Start a client to get a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register get -n baobao + - name: Edit Public Register (new signing key is not the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 333 env: - SN_LOG: "all" - timeout-minutes: 2 + SN_LOG: "v" + timeout-minutes: 10 - - name: Start a client to edit a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register edit -n baobao wood + - name: Get Public Register (new signing key is not the owner) + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 2 - name: Stop the local network and upload logs @@ -134,31 +244,17 @@ jobs: run: cargo test --release --lib --bins --no-run timeout-minutes: 30 - - name: Run CLI tests - timeout-minutes: 25 - run: cargo test --release --package sn_cli -- --skip test_acc_packet_ - - - name: Run client tests - timeout-minutes: 25 - # we do not run the `--tests` here are they are run in the e2e job - # as they rquire a network - run: | - cargo test --release --package sn_client --doc - cargo test --release --package sn_client --lib - cargo test --release --package sn_client --bins - cargo test --release --package sn_client --examples - - name: Run node tests timeout-minutes: 25 run: cargo test --release --package sn_node --lib - name: Run network tests timeout-minutes: 25 - run: cargo test --release -p sn_networking --features="open-metrics" + run: cargo test --release --package sn_networking --features="open-metrics" - name: Run protocol tests timeout-minutes: 25 - run: cargo test --release -p sn_protocol + run: cargo test --release --package sn_protocol - name: Run transfers tests timeout-minutes: 25 @@ -167,13 +263,12 @@ jobs: - name: Run logging tests timeout-minutes: 25 run: cargo test --release --package sn_logging - + - name: Run register tests - shell: bash timeout-minutes: 50 + run: cargo test --release --package sn_registers env: PROPTEST_CASES: 512 - run: cargo test --release -p sn_registers - name: post notification to slack on failure if: ${{ failure() }} @@ -183,210 +278,6 @@ jobs: SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" SLACK_TITLE: "Nightly Unit Test Run Failed" - spend_test: - name: spend tests against network - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: Build binaries - run: cargo build --release --features=local --bin safenode --bin faucet - timeout-minutes: 30 - - - name: Build testing executable - run: cargo test --release -p sn_node --features=local --test sequential_transfers --test storage_payments --test double_spend --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: execute the sequential transfers test - run: cargo test --release -p sn_node --features="local" --test sequential_transfers -- --nocapture --test-threads=1 - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - SN_LOG: "all" - timeout-minutes: 10 - - - name: execute the storage payment tests - run: cargo test --release -p sn_node --features="local" --test storage_payments -- --nocapture --test-threads=1 - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - SN_LOG: "all" - timeout-minutes: 10 - - - name: execute the double spend tests - run: cargo test --release -p sn_node --features="local" --test double_spend -- --nocapture --test-threads=1 - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Small wait to allow reward receipt - run: sleep 30 - timeout-minutes: 1 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_spend - platform: ${{ matrix.os }} - - - name: post notification to slack on failure - if: ${{ failure() }} - uses: bryannice/gitactions-slack-notification@2.0.0 - env: - SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - SLACK_TITLE: "Nightly Spend Test Run Failed" - - # runs with increased node count - spend_simulation: - name: spend simulation - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: Build binaries - run: cargo build --release --features=local --bin safenode --bin faucet - timeout-minutes: 30 - - - name: Build testing executable - run: cargo test --release -p sn_node --features=local --test spend_simulation --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-count: 50 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: execute the spend simulation test - run: cargo test --release -p sn_node --features="local" --test spend_simulation -- --nocapture - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Small wait to allow reward receipt - run: sleep 30 - timeout-minutes: 1 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_spend_simulation - platform: ${{ matrix.os }} - - - name: post notification to slack on failure - if: ${{ failure() }} - uses: bryannice/gitactions-slack-notification@2.0.0 - env: - SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - SLACK_TITLE: "Nightly Spend Test Run Failed" - - token_distribution_test: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: token distribution test - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - - - name: Build binaries - run: cargo build --release --features=local,distribution --bin safenode --bin faucet - timeout-minutes: 30 - - - name: Build testing executable - run: cargo test --release --features=local,distribution --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - name: execute token_distribution tests - run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1 - env: - SN_LOG: "all" - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_token_distribution - platform: ${{ matrix.os }} - churn: name: Network churning tests runs-on: ${{ matrix.os }} @@ -412,7 +303,7 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features local --bin safenode --bin faucet + run: cargo build --release --features local --bin safenode timeout-minutes: 30 - name: Build churn tests @@ -427,14 +318,13 @@ jobs: uses: maidsafe/sn-local-testnet-action@main with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ${{ matrix.os }} build: true - name: Chunks data integrity during nodes churn (during 10min) (in theory) - run: cargo test --release -p sn_node --features="local" --test data_with_churn -- --nocapture + run: cargo test --release -p sn_node --features=local --test data_with_churn -- --nocapture env: TEST_DURATION_MINS: 60 TEST_CHURN_CYCLES: 6 @@ -442,7 +332,46 @@ jobs: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 90 - - name: Verify restart of nodes using rg + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + log_file_prefix: safe_test_logs_churn + platform: ${{ matrix.os }} + + + - name: Get total node count + shell: bash + timeout-minutes: 1 + run: | + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + echo "Node dir count is $node_count" + + - name: Get restart of nodes using rg + shell: bash + timeout-minutes: 1 + # get the counts, then the specific line, and then the digit count only + # then check we have an expected level of restarts + # TODO: make this use an env var, or relate to testnet size + run: | + restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Restarted $restart_count nodes" + + - name: Get peers removed from nodes using rg + shell: bash + timeout-minutes: 1 + run: | + peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) || { echo "Failed to extract peer removal count"; exit 1; } + if [ -z "$peer_removed" ]; then + echo "No peer removal count found" + exit 1 + fi + echo "PeerRemovedFromRoutingTable $peer_removed times" + + - name: Verify peers removed exceed restarted node counts shell: bash timeout-minutes: 1 # get the counts, then the specific line, and then the digit count only @@ -459,8 +388,6 @@ jobs: echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" exit 1 fi - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - echo "Node dir count is $node_count" # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here # if [ $restart_count -lt $node_count ]; then @@ -484,14 +411,6 @@ jobs: exit 1 fi - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_churn - platform: ${{ matrix.os }} - - name: post notification to slack on failure if: ${{ failure() }} uses: bryannice/gitactions-slack-notification@2.0.0 @@ -537,7 +456,7 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features local --bin safenode --bin faucet + run: cargo build --release --features local --bin safenode timeout-minutes: 30 - name: Build data location and routing table tests @@ -552,31 +471,38 @@ jobs: uses: maidsafe/sn-local-testnet-action@main with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ${{ matrix.os }} build: true - name: Verify the Routing table of the nodes - run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features=local --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 - name: Verify the location of the data on the network - run: cargo test --release -p sn_node --features="local" --test verify_data_location -- --nocapture + run: cargo test --release -p sn_node --features=local --test verify_data_location -- --nocapture env: SN_LOG: "all" CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 90 - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features=local --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + log_file_prefix: safe_test_logs_data_location + platform: ${{ matrix.os }} + - name: Verify restart of nodes using rg shell: bash timeout-minutes: 1 @@ -597,14 +523,6 @@ jobs: node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) echo "Node dir count is $node_count" - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_data_location - platform: ${{ matrix.os }} - - name: post notification to slack on failure if: ${{ failure() }} uses: bryannice/gitactions-slack-notification@2.0.0 From 8616c7a2d4d83579dd2dba5f81032c9ac529bb90 Mon Sep 17 00:00:00 2001 From: grumbach Date: Fri, 25 Oct 2024 12:30:15 +0900 Subject: [PATCH 075/128] feat: private data, private archives, vault support and CLI integration --- Cargo.lock | 6 +- autonomi-cli/Cargo.toml | 2 + autonomi-cli/src/access/user_data.rs | 77 +++++++++++++- autonomi-cli/src/actions/download.rs | 78 +++++++++++++- autonomi-cli/src/commands.rs | 7 +- autonomi-cli/src/commands/file.rs | 72 ++++++++++--- autonomi-cli/src/commands/vault.rs | 20 +++- autonomi/src/client/archive.rs | 1 + autonomi/src/client/archive_private.rs | 140 +++++++++++++++++++++++++ autonomi/src/client/data.rs | 44 ++++---- autonomi/src/client/data_private.rs | 138 ++++++++++++++++++++++++ autonomi/src/client/fs.rs | 2 +- autonomi/src/client/fs_private.rs | 101 ++++++++++++++++++ autonomi/src/client/mod.rs | 6 ++ autonomi/src/client/vault/user_data.rs | 3 + evmlib/src/contract/network_token.rs | 2 +- 16 files changed, 649 insertions(+), 50 deletions(-) create mode 100644 autonomi/src/client/archive_private.rs create mode 100644 autonomi/src/client/data_private.rs create mode 100644 autonomi/src/client/fs_private.rs diff --git a/Cargo.lock b/Cargo.lock index ef840ca3a9..d274255dbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1134,6 +1134,8 @@ dependencies = [ "indicatif", "rand 0.8.5", "rayon", + "serde", + "serde_json", "sn_build_info", "sn_logging", "sn_peers_acquisition", @@ -7813,9 +7815,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index 775d5f7f86..7e71a4a841 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -49,6 +49,8 @@ sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } sn_build_info = { path = "../sn_build_info", version = "0.1.16" } sn_logging = { path = "../sn_logging", version = "0.2.37" } walkdir = "2.5.0" +serde_json = "1.0.132" +serde = "1.0.210" [dev-dependencies] autonomi = { path = "../autonomi", version = "0.2.1", features = [ diff --git a/autonomi-cli/src/access/user_data.rs b/autonomi-cli/src/access/user_data.rs index 799c23c0d7..57deb85785 100644 --- a/autonomi-cli/src/access/user_data.rs +++ b/autonomi-cli/src/access/user_data.rs @@ -11,6 +11,7 @@ use std::collections::HashMap; use autonomi::client::{ address::{addr_to_str, str_to_addr}, archive::ArchiveAddr, + archive_private::PrivateArchiveAccess, registers::{RegisterAddress, RegisterSecretKey}, vault::UserData, }; @@ -21,19 +22,62 @@ use super::{ keys::{create_register_signing_key_file, get_register_signing_key}, }; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +struct PrivateFileArchive { + name: String, + secret_access: String, +} + pub fn get_local_user_data() -> Result { let register_sk = get_register_signing_key().map(|k| k.to_hex()).ok(); let registers = get_local_registers()?; - let file_archives = get_local_file_archives()?; + let file_archives = get_local_public_file_archives()?; + let private_file_archives = get_local_private_file_archives()?; let user_data = UserData { register_sk, registers, file_archives, + private_file_archives, }; Ok(user_data) } +pub fn get_local_private_file_archives() -> Result> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let private_file_archives_path = user_data_path.join("private_file_archives"); + std::fs::create_dir_all(&private_file_archives_path)?; + + let mut private_file_archives = HashMap::new(); + for entry in walkdir::WalkDir::new(private_file_archives_path) + .min_depth(1) + .max_depth(1) + { + let entry = entry?; + let file_content = std::fs::read_to_string(entry.path())?; + let private_file_archive: PrivateFileArchive = serde_json::from_str(&file_content)?; + let private_file_archive_access = + PrivateArchiveAccess::from_hex(&private_file_archive.secret_access)?; + private_file_archives.insert(private_file_archive_access, private_file_archive.name); + } + Ok(private_file_archives) +} + +pub fn get_local_private_archive_access(local_addr: &str) -> Result { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let private_file_archives_path = user_data_path.join("private_file_archives"); + let file_path = private_file_archives_path.join(local_addr); + let file_content = std::fs::read_to_string(file_path)?; + let private_file_archive: PrivateFileArchive = serde_json::from_str(&file_content)?; + let private_file_archive_access = + PrivateArchiveAccess::from_hex(&private_file_archive.secret_access)?; + Ok(private_file_archive_access) +} + pub fn get_local_registers() -> Result> { let data_dir = get_client_data_dir_path()?; let user_data_path = data_dir.join("user_data"); @@ -55,7 +99,7 @@ pub fn get_local_registers() -> Result> { Ok(registers) } -pub fn get_local_file_archives() -> Result> { +pub fn get_local_public_file_archives() -> Result> { let data_dir = get_client_data_dir_path()?; let user_data_path = data_dir.join("user_data"); let file_archives_path = user_data_path.join("file_archives"); @@ -86,8 +130,13 @@ pub fn write_local_user_data(user_data: &UserData) -> Result<()> { } for (archive, name) in user_data.file_archives.iter() { - write_local_file_archive(archive, name)?; + write_local_public_file_archive(addr_to_str(*archive), name)?; + } + + for (archive, name) in user_data.private_file_archives.iter() { + write_local_private_file_archive(archive.to_hex(), archive.address(), name)?; } + Ok(()) } @@ -100,11 +149,29 @@ pub fn write_local_register(register: &RegisterAddress, name: &str) -> Result<() Ok(()) } -pub fn write_local_file_archive(archive: &ArchiveAddr, name: &str) -> Result<()> { +pub fn write_local_public_file_archive(archive: String, name: &str) -> Result<()> { let data_dir = get_client_data_dir_path()?; let user_data_path = data_dir.join("user_data"); let file_archives_path = user_data_path.join("file_archives"); std::fs::create_dir_all(&file_archives_path)?; - std::fs::write(file_archives_path.join(addr_to_str(*archive)), name)?; + std::fs::write(file_archives_path.join(archive), name)?; + Ok(()) +} + +pub fn write_local_private_file_archive( + archive: String, + local_addr: String, + name: &str, +) -> Result<()> { + let data_dir = get_client_data_dir_path()?; + let user_data_path = data_dir.join("user_data"); + let private_file_archives_path = user_data_path.join("private_file_archives"); + std::fs::create_dir_all(&private_file_archives_path)?; + let file_name = local_addr; + let content = serde_json::to_string(&PrivateFileArchive { + name: name.to_string(), + secret_access: archive, + })?; + std::fs::write(private_file_archives_path.join(file_name), content)?; Ok(()) } diff --git a/autonomi-cli/src/actions/download.rs b/autonomi-cli/src/actions/download.rs index 069a37c0eb..7beb3578f1 100644 --- a/autonomi-cli/src/actions/download.rs +++ b/autonomi-cli/src/actions/download.rs @@ -7,12 +7,84 @@ // permissions and limitations relating to use of the SAFE Network Software. use super::get_progress_bar; -use autonomi::{client::address::str_to_addr, Client}; -use color_eyre::eyre::{eyre, Context, Result}; +use autonomi::{ + client::{address::str_to_addr, archive::ArchiveAddr, archive_private::PrivateArchiveAccess}, + Client, +}; +use color_eyre::{ + eyre::{eyre, Context, Result}, + Section, +}; use std::path::PathBuf; pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Result<()> { - let address = str_to_addr(addr).wrap_err("Failed to parse data address")?; + let public_address = str_to_addr(addr).ok(); + let private_address = crate::user_data::get_local_private_archive_access(addr) + .inspect_err(|e| error!("Failed to get private archive access: {e}")) + .ok(); + + match (public_address, private_address) { + (Some(public_address), _) => download_public(addr, public_address, dest_path, client).await, + (_, Some(private_address)) => download_private(addr, private_address, dest_path, client).await, + _ => Err(eyre!("Failed to parse data address")) + .with_suggestion(|| "Public addresses look like this: 0037cfa13eae4393841cbc00c3a33cade0f98b8c1f20826e5c51f8269e7b09d7") + .with_suggestion(|| "Private addresses look like this: 1358645341480028172") + .with_suggestion(|| "Try the `file list` command to get addresses you have access to"), + } +} + +async fn download_private( + addr: &str, + private_address: PrivateArchiveAccess, + dest_path: &str, + client: &mut Client, +) -> Result<()> { + let archive = client + .private_archive_get(private_address) + .await + .wrap_err("Failed to fetch data from address")?; + + let progress_bar = get_progress_bar(archive.iter().count() as u64)?; + let mut all_errs = vec![]; + for (path, access, _meta) in archive.iter() { + progress_bar.println(format!("Fetching file: {path:?}...")); + let bytes = match client.private_data_get(access.clone()).await { + Ok(bytes) => bytes, + Err(e) => { + let err = format!("Failed to fetch file {path:?}: {e}"); + all_errs.push(err); + continue; + } + }; + + let path = PathBuf::from(dest_path).join(path); + let here = PathBuf::from("."); + let parent = path.parent().unwrap_or_else(|| &here); + std::fs::create_dir_all(parent)?; + std::fs::write(path, bytes)?; + progress_bar.clone().inc(1); + } + progress_bar.finish_and_clear(); + + if all_errs.is_empty() { + info!("Successfully downloaded private data with local address: {addr}"); + println!("Successfully downloaded private data with local address: {addr}"); + Ok(()) + } else { + let err_no = all_errs.len(); + eprintln!("{err_no} errors while downloading private data with local address: {addr}"); + eprintln!("{all_errs:#?}"); + error!("Errors while downloading private data with local address {addr}: {all_errs:#?}"); + Err(eyre!("Errors while downloading private data")) + } +} + +async fn download_public( + addr: &str, + address: ArchiveAddr, + dest_path: &str, + client: &mut Client, +) -> Result<()> { let archive = client .archive_get(address) .await diff --git a/autonomi-cli/src/commands.rs b/autonomi-cli/src/commands.rs index 06adb34006..c374eca78f 100644 --- a/autonomi-cli/src/commands.rs +++ b/autonomi-cli/src/commands.rs @@ -44,10 +44,13 @@ pub enum FileCmd { file: String, }, - /// Upload a file and pay for it. + /// Upload a file and pay for it. Data on the Network is private by default. Upload { /// The file to upload. file: String, + /// Upload the file as public. Everyone can see public data on the Network. + #[arg(short, long)] + public: bool, }, /// Download a file from the given address. @@ -149,7 +152,7 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { match cmd { SubCmd::File { command } => match command { FileCmd::Cost { file } => file::cost(&file, peers.await?).await, - FileCmd::Upload { file } => file::upload(&file, peers.await?).await, + FileCmd::Upload { file, public } => file::upload(&file, public, peers.await?).await, FileCmd::Download { addr, dest_file } => { file::download(&addr, &dest_file, peers.await?).await } diff --git a/autonomi-cli/src/commands/file.rs b/autonomi-cli/src/commands/file.rs index faf21137e6..e32b98b51d 100644 --- a/autonomi-cli/src/commands/file.rs +++ b/autonomi-cli/src/commands/file.rs @@ -30,44 +30,68 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { Ok(()) } -pub async fn upload(file: &str, peers: Vec) -> Result<()> { +pub async fn upload(file: &str, public: bool, peers: Vec) -> Result<()> { let wallet = crate::keys::load_evm_wallet()?; let mut client = crate::actions::connect_to_network(peers).await?; let event_receiver = client.enable_client_events(); let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); println!("Uploading data to network..."); - info!("Uploading file: {file}"); + info!( + "Uploading {} file: {file}", + if public { "public" } else { "private" } + ); let dir_path = PathBuf::from(file); let name = dir_path .file_name() .map(|n| n.to_string_lossy().to_string()) .unwrap_or(file.to_string()); - let xor_name = client - .dir_upload(dir_path, &wallet) - .await - .wrap_err("Failed to upload file")?; - let addr = addr_to_str(xor_name); + // upload dir + let local_addr; + let archive = if public { + let xor_name = client + .dir_upload(dir_path, &wallet) + .await + .wrap_err("Failed to upload file")?; + local_addr = addr_to_str(xor_name); + local_addr.clone() + } else { + let private_data_access = client + .private_dir_upload(dir_path, &wallet) + .await + .wrap_err("Failed to upload file")?; + local_addr = private_data_access.address(); + private_data_access.to_hex() + }; + + // wait for upload to complete if let Err(e) = upload_completed_tx.send(()) { error!("Failed to send upload completed event: {e:?}"); eprintln!("Failed to send upload completed event: {e:?}"); } + // get summary let summary = upload_summary_thread.await?; if summary.record_count == 0 { println!("All chunks already exist on the network."); } else { println!("Successfully uploaded: {file}"); - println!("At address: {addr}"); - info!("Successfully uploaded: {file} at address: {addr}"); + println!("At address: {local_addr}"); + info!("Successfully uploaded: {file} at address: {local_addr}"); println!("Number of chunks uploaded: {}", summary.record_count); println!("Total cost: {} AttoTokens", summary.tokens_spent); } - info!("Summary for upload of file {file} at {addr:?}: {summary:?}"); + info!("Summary for upload of file {file} at {local_addr:?}: {summary:?}"); - crate::user_data::write_local_file_archive(&xor_name, &name) + // save to local user data + let writer = if public { + crate::user_data::write_local_public_file_archive(archive, &name) + } else { + crate::user_data::write_local_private_file_archive(archive, local_addr, &name) + }; + writer .wrap_err("Failed to save file to local user data") .with_suggestion(|| "Local user data saves the file address above to disk, without it you need to keep track of the address yourself")?; info!("Saved file to local user data"); @@ -81,11 +105,33 @@ pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Res } pub fn list() -> Result<()> { + // get public file archives println!("Retrieving local user data..."); - let file_archives = crate::user_data::get_local_file_archives()?; - println!("✅ You have {} file archive(s):", file_archives.len()); + let file_archives = crate::user_data::get_local_public_file_archives() + .wrap_err("Failed to get local public file archives")?; + + println!( + "✅ You have {} public file archive(s):", + file_archives.len() + ); for (addr, name) in file_archives { println!("{}: {}", name, addr_to_str(addr)); } + + // get private file archives + println!(); + let private_file_archives = crate::user_data::get_local_private_file_archives() + .wrap_err("Failed to get local private file archives")?; + + println!( + "✅ You have {} private file archive(s):", + private_file_archives.len() + ); + for (addr, name) in private_file_archives { + println!("{}: {}", name, addr.address()); + } + + println!(); + println!("> Note that private data addresses are not network addresses, they are only used for referring to private data client side."); Ok(()) } diff --git a/autonomi-cli/src/commands/vault.rs b/autonomi-cli/src/commands/vault.rs index 9888366eec..60c0c8192f 100644 --- a/autonomi-cli/src/commands/vault.rs +++ b/autonomi-cli/src/commands/vault.rs @@ -34,6 +34,7 @@ pub async fn create(peers: Vec) -> Result<()> { println!("Retrieving local user data..."); let local_user_data = crate::user_data::get_local_user_data()?; let file_archives_len = local_user_data.file_archives.len(); + let private_file_archives_len = local_user_data.private_file_archives.len(); let registers_len = local_user_data.registers.len(); println!("Pushing to network vault..."); @@ -48,7 +49,10 @@ pub async fn create(peers: Vec) -> Result<()> { } println!("Total cost: {total_cost} AttoTokens"); - println!("Vault contains {file_archives_len} file archive(s) and {registers_len} register(s)"); + println!("Vault contains:"); + println!("{file_archives_len} public file archive(s)"); + println!("{private_file_archives_len} private file archive(s)"); + println!("{registers_len} register(s)"); Ok(()) } @@ -74,13 +78,17 @@ pub async fn sync(peers: Vec, force: bool) -> Result<()> { println!("Pushing local user data to network vault..."); let local_user_data = crate::user_data::get_local_user_data()?; let file_archives_len = local_user_data.file_archives.len(); + let private_file_archives_len = local_user_data.private_file_archives.len(); let registers_len = local_user_data.registers.len(); client .put_user_data_to_vault(&vault_sk, &wallet, local_user_data) .await?; println!("✅ Successfully synced vault"); - println!("Vault contains {file_archives_len} file archive(s) and {registers_len} register(s)"); + println!("Vault contains:"); + println!("{file_archives_len} public file archive(s)"); + println!("{private_file_archives_len} private file archive(s)"); + println!("{registers_len} register(s)"); Ok(()) } @@ -93,10 +101,12 @@ pub async fn load(peers: Vec) -> Result<()> { println!("Writing user data to disk..."); crate::user_data::write_local_user_data(&user_data)?; + println!("✅ Successfully loaded vault with:"); + println!("{} public file archive(s)", user_data.file_archives.len()); println!( - "✅ Successfully loaded vault with {} file archive(s) and {} register(s)", - user_data.file_archives.len(), - user_data.registers.len() + "{} private file archive(s)", + user_data.private_file_archives.len() ); + println!("{} register(s)", user_data.registers.len()); Ok(()) } diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs index 3957b3d942..04ad120b19 100644 --- a/autonomi/src/client/archive.rs +++ b/autonomi/src/client/archive.rs @@ -35,6 +35,7 @@ pub enum RenameError { /// An archive of files that containing file paths, their metadata and the files data addresses /// Using archives is useful for uploading entire directories to the network, only needing to keep track of a single address. +/// Archives are public meaning anyone can read the data in the archive. For private archives use [`crate::client::archive_private::PrivateArchive`]. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] pub struct Archive { map: HashMap, diff --git a/autonomi/src/client/archive_private.rs b/autonomi/src/client/archive_private.rs new file mode 100644 index 0000000000..a7ba854380 --- /dev/null +++ b/autonomi/src/client/archive_private.rs @@ -0,0 +1,140 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; + +use sn_networking::target_arch::{Duration, SystemTime, UNIX_EPOCH}; + +use super::{ + archive::{Metadata, RenameError}, + data::{GetError, PutError}, + data_private::PrivateDataAccess, + Client, +}; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use sn_evm::EvmWallet; + +/// The address of a private archive +/// Contains the [`PrivateDataAccess`] leading to the [`PrivateArchive`] data +pub type PrivateArchiveAccess = PrivateDataAccess; + +/// A private archive of files that containing file paths, their metadata and the files data maps +/// Using archives is useful for uploading entire directories to the network, only needing to keep track of a single address. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] +pub struct PrivateArchive { + map: HashMap, +} + +impl PrivateArchive { + /// Create a new emtpy local archive + /// Note that this does not upload the archive to the network + pub fn new() -> Self { + Self { + map: HashMap::new(), + } + } + + /// Rename a file in an archive + /// Note that this does not upload the archive to the network + pub fn rename_file(&mut self, old_path: &Path, new_path: &Path) -> Result<(), RenameError> { + let (data_addr, mut meta) = self + .map + .remove(old_path) + .ok_or(RenameError::FileNotFound(old_path.to_path_buf()))?; + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::from_secs(0)) + .as_secs(); + meta.modified = now; + self.map.insert(new_path.to_path_buf(), (data_addr, meta)); + Ok(()) + } + + /// Add a file to a local archive + /// Note that this does not upload the archive to the network + pub fn add_file(&mut self, path: PathBuf, data_map: PrivateDataAccess, meta: Metadata) { + self.map.insert(path, (data_map, meta)); + } + + /// Add a file to a local archive, with default metadata + /// Note that this does not upload the archive to the network + pub fn add_new_file(&mut self, path: PathBuf, data_map: PrivateDataAccess) { + self.map.insert(path, (data_map, Metadata::new())); + } + + /// List all files in the archive + pub fn files(&self) -> Vec<(PathBuf, Metadata)> { + self.map + .iter() + .map(|(path, (_, meta))| (path.clone(), meta.clone())) + .collect() + } + + /// List all data addresses of the files in the archive + pub fn addresses(&self) -> Vec { + self.map + .values() + .map(|(data_map, _)| data_map.clone()) + .collect() + } + + /// Iterate over the archive items + /// Returns an iterator over (PathBuf, SecretDataMap, Metadata) + pub fn iter(&self) -> impl Iterator { + self.map + .iter() + .map(|(path, (data_map, meta))| (path, data_map, meta)) + } + + /// Get the underlying map + pub fn map(&self) -> &HashMap { + &self.map + } + + /// Deserialize from bytes. + pub fn from_bytes(data: Bytes) -> Result { + let root: PrivateArchive = rmp_serde::from_slice(&data[..])?; + + Ok(root) + } + + /// Serialize to bytes. + pub fn into_bytes(&self) -> Result { + let root_serialized = rmp_serde::to_vec(&self)?; + let root_serialized = Bytes::from(root_serialized); + + Ok(root_serialized) + } +} + +impl Client { + /// Fetch a private archive from the network + pub async fn private_archive_get( + &self, + addr: PrivateArchiveAccess, + ) -> Result { + let data = self.private_data_get(addr).await?; + Ok(PrivateArchive::from_bytes(data)?) + } + + /// Upload a private archive to the network + pub async fn private_archive_put( + &self, + archive: PrivateArchive, + wallet: &EvmWallet, + ) -> Result { + let bytes = archive + .into_bytes() + .map_err(|e| PutError::Serialization(format!("Failed to serialize archive: {e:?}")))?; + self.private_data_put(bytes, wallet).await + } +} diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 6fda246380..869022cd37 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -8,7 +8,7 @@ use bytes::Bytes; use libp2p::kad::Quorum; -use tokio::task::JoinError; +use tokio::task::{JoinError, JoinSet}; use std::collections::HashSet; use xor_name::XorName; @@ -47,6 +47,8 @@ pub enum PutError { VaultBadOwner, #[error("Payment unexpectedly invalid for {0:?}")] PaymentUnexpectedlyInvalid(NetworkAddress), + #[error("Could not simultaneously upload chunks: {0:?}")] + JoinError(tokio::task::JoinError), } /// Errors that can occur during the pay operation. @@ -102,8 +104,9 @@ impl Client { Ok(data) } - /// Upload a piece of data to the network. This data will be self-encrypted. + /// Upload a piece of data to the network. /// Returns the Data Address at which the data was stored. + /// This data is publicly accessible. pub async fn data_put(&self, data: Bytes, wallet: &EvmWallet) -> Result { let now = sn_networking::target_arch::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; @@ -130,26 +133,31 @@ impl Client { let mut record_count = 0; - // Upload data map - if let Some(proof) = payment_proofs.get(&map_xor_name) { - debug!("Uploading data map chunk: {map_xor_name:?}"); - self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone()) - .await - .inspect_err(|err| error!("Error uploading data map chunk: {err:?}"))?; - record_count += 1; - } - - // Upload the rest of the chunks + // Upload all the chunks in parallel including the data map chunk debug!("Uploading {} chunks", chunks.len()); - for chunk in chunks { + let mut tasks = JoinSet::new(); + for chunk in chunks.into_iter().chain(std::iter::once(data_map_chunk)) { + let self_clone = self.clone(); + let address = *chunk.address(); if let Some(proof) = payment_proofs.get(chunk.name()) { - let address = *chunk.address(); - self.chunk_upload_with_payment(chunk, proof.clone()) - .await - .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?; - record_count += 1; + let proof_clone = proof.clone(); + tasks.spawn(async move { + self_clone + .chunk_upload_with_payment(chunk, proof_clone) + .await + .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}")) + }); + } else { + debug!("Chunk at {address:?} was already paid for so skipping"); } } + while let Some(result) = tasks.join_next().await { + result + .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) + .map_err(PutError::JoinError)? + .inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; + record_count += 1; + } if let Some(channel) = self.client_event_sender.as_ref() { let tokens_spent = payment_proofs diff --git a/autonomi/src/client/data_private.rs b/autonomi/src/client/data_private.rs new file mode 100644 index 0000000000..b6d0bfa8a3 --- /dev/null +++ b/autonomi/src/client/data_private.rs @@ -0,0 +1,138 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::hash::{DefaultHasher, Hash, Hasher}; + +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use sn_evm::{Amount, EvmWallet}; +use sn_protocol::storage::Chunk; +use tokio::task::JoinSet; + +use super::data::{GetError, PutError}; +use crate::client::{ClientEvent, UploadSummary}; +use crate::{self_encryption::encrypt, Client}; + +/// Private data on the network can be accessed with this +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct PrivateDataAccess(Chunk); + +impl PrivateDataAccess { + pub fn to_hex(&self) -> String { + hex::encode(self.0.value()) + } + + pub fn from_hex(hex: &str) -> Result { + let data = hex::decode(hex)?; + Ok(Self(Chunk::new(Bytes::from(data)))) + } + + /// Get a private address for [`PrivateDataAccess`]. Note that this is not a network address, it is only used for refering to private data client side. + pub fn address(&self) -> String { + hash_to_short_string(&self.to_hex()) + } +} + +fn hash_to_short_string(input: &str) -> String { + let mut hasher = DefaultHasher::new(); + input.hash(&mut hasher); + let hash_value = hasher.finish(); + hash_value.to_string() +} + +impl Client { + /// Fetch a blob of private data from the network + pub async fn private_data_get(&self, data_map: PrivateDataAccess) -> Result { + info!( + "Fetching private data from Data Map {:?}", + data_map.0.address() + ); + let data = self.fetch_from_data_map_chunk(data_map.0.value()).await?; + + Ok(data) + } + + /// Upload a piece of private data to the network. This data will be self-encrypted. + /// Returns the [`PrivateDataAccess`] containing the map to the encrypted chunks. + /// This data is private and only accessible with the [`PrivateDataAccess`]. + pub async fn private_data_put( + &self, + data: Bytes, + wallet: &EvmWallet, + ) -> Result { + let now = sn_networking::target_arch::Instant::now(); + let (data_map_chunk, chunks) = encrypt(data)?; + debug!("Encryption took: {:.2?}", now.elapsed()); + + // Pay for all chunks + let xor_names: Vec<_> = chunks.iter().map(|chunk| *chunk.name()).collect(); + info!("Paying for {} addresses", xor_names.len()); + let (payment_proofs, _free_chunks) = self + .pay(xor_names.into_iter(), wallet) + .await + .inspect_err(|err| error!("Error paying for data: {err:?}"))?; + + // Upload the chunks with the payments + let mut record_count = 0; + debug!("Uploading {} chunks", chunks.len()); + let mut tasks = JoinSet::new(); + for chunk in chunks { + let self_clone = self.clone(); + let address = *chunk.address(); + if let Some(proof) = payment_proofs.get(chunk.name()) { + let proof_clone = proof.clone(); + tasks.spawn(async move { + self_clone + .chunk_upload_with_payment(chunk, proof_clone) + .await + .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}")) + }); + } else { + debug!("Chunk at {address:?} was already paid for so skipping"); + } + } + while let Some(result) = tasks.join_next().await { + result + .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) + .map_err(PutError::JoinError)? + .inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; + record_count += 1; + } + + // Reporting + if let Some(channel) = self.client_event_sender.as_ref() { + let tokens_spent = payment_proofs + .values() + .map(|proof| proof.quote.cost.as_atto()) + .sum::(); + + let summary = UploadSummary { + record_count, + tokens_spent, + }; + if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { + error!("Failed to send client event: {err:?}"); + } + } + + Ok(PrivateDataAccess(data_map_chunk)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_hex() { + let data_map = PrivateDataAccess(Chunk::new(Bytes::from_static(b"hello"))); + let hex = data_map.to_hex(); + let data_map2 = PrivateDataAccess::from_hex(&hex).expect("Failed to decode hex"); + assert_eq!(data_map, data_map2); + } +} diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index 51311e2f70..d7f243df68 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -179,7 +179,7 @@ impl Client { // Get metadata from directory entry. Defaults to `0` for creation and modification times if // any error is encountered. Logs errors upon error. -fn metadata_from_entry(entry: &walkdir::DirEntry) -> Metadata { +pub(crate) fn metadata_from_entry(entry: &walkdir::DirEntry) -> Metadata { let fs_metadata = match entry.metadata() { Ok(metadata) => metadata, Err(err) => { diff --git a/autonomi/src/client/fs_private.rs b/autonomi/src/client/fs_private.rs new file mode 100644 index 0000000000..0d9b819d70 --- /dev/null +++ b/autonomi/src/client/fs_private.rs @@ -0,0 +1,101 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::client::Client; +use bytes::Bytes; +use sn_evm::EvmWallet; +use std::path::PathBuf; + +use super::archive_private::{PrivateArchive, PrivateArchiveAccess}; +use super::data_private::PrivateDataAccess; +use super::fs::{DownloadError, UploadError}; + +impl Client { + /// Download a private file from network to local file system + pub async fn private_file_download( + &self, + data_access: PrivateDataAccess, + to_dest: PathBuf, + ) -> Result<(), DownloadError> { + let data = self.private_data_get(data_access).await?; + if let Some(parent) = to_dest.parent() { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(to_dest, data).await?; + Ok(()) + } + + /// Download a private directory from network to local file system + pub async fn private_dir_download( + &self, + archive_access: PrivateArchiveAccess, + to_dest: PathBuf, + ) -> Result<(), DownloadError> { + let archive = self.private_archive_get(archive_access).await?; + for (path, addr, _meta) in archive.iter() { + self.private_file_download(addr.clone(), to_dest.join(path)) + .await?; + } + Ok(()) + } + + /// Upload a private directory to the network. The directory is recursively walked. + /// Reads all files, splits into chunks, uploads chunks, uploads private archive, returns [`PrivateArchiveAccess`] (pointing to the private archive) + pub async fn private_dir_upload( + &self, + dir_path: PathBuf, + wallet: &EvmWallet, + ) -> Result { + let mut archive = PrivateArchive::new(); + + for entry in walkdir::WalkDir::new(dir_path) { + let entry = entry?; + + if !entry.file_type().is_file() { + continue; + } + + let path = entry.path().to_path_buf(); + tracing::info!("Uploading file: {path:?}"); + #[cfg(feature = "loud")] + println!("Uploading file: {path:?}"); + let file = self.private_file_upload(path.clone(), wallet).await?; + + let metadata = super::fs::metadata_from_entry(&entry); + + archive.add_file(path, file, metadata); + } + + let archive_serialized = archive.into_bytes()?; + + let arch_addr = self.private_data_put(archive_serialized, wallet).await?; + + Ok(arch_addr) + } + + /// Upload a private file to the network. + /// Reads file, splits into chunks, uploads chunks, uploads datamap, returns [`PrivateDataAccess`] (pointing to the datamap) + async fn private_file_upload( + &self, + path: PathBuf, + wallet: &EvmWallet, + ) -> Result { + let data = tokio::fs::read(path).await?; + let data = Bytes::from(data); + let addr = self.private_data_put(data, wallet).await?; + Ok(addr) + } +} diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 4771d19e2a..d530f210f2 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -11,11 +11,17 @@ pub mod address; #[cfg(feature = "data")] pub mod archive; #[cfg(feature = "data")] +pub mod archive_private; +#[cfg(feature = "data")] pub mod data; +#[cfg(feature = "data")] +pub mod data_private; #[cfg(feature = "external-signer")] pub mod external_signer; #[cfg(feature = "fs")] pub mod fs; +#[cfg(feature = "fs")] +pub mod fs_private; #[cfg(feature = "registers")] pub mod registers; #[cfg(feature = "vault")] diff --git a/autonomi/src/client/vault/user_data.rs b/autonomi/src/client/vault/user_data.rs index 736bd6292d..1f91b547bb 100644 --- a/autonomi/src/client/vault/user_data.rs +++ b/autonomi/src/client/vault/user_data.rs @@ -9,6 +9,7 @@ use std::collections::HashMap; use crate::client::archive::ArchiveAddr; +use crate::client::archive_private::PrivateArchiveAccess; use crate::client::data::GetError; use crate::client::data::PutError; use crate::client::registers::RegisterAddress; @@ -37,6 +38,8 @@ pub struct UserData { pub registers: HashMap, /// Owned file archive addresses, along with their names (can be empty) pub file_archives: HashMap, + /// Owned private file archives, along with their names (can be empty) + pub private_file_archives: HashMap, } /// Errors that can occur during the get operation. diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs index 013d572037..10903c9fd2 100644 --- a/evmlib/src/contract/network_token.rs +++ b/evmlib/src/contract/network_token.rs @@ -52,7 +52,7 @@ where pub async fn deploy(provider: P) -> Self { let contract = NetworkTokenContract::deploy(provider) .await - .expect("Could not deploy contract"); + .expect("Could not deploy contract, update anvil by running `foundryup` and try again"); NetworkToken { contract } } From d8cad7c4daf93ebc85dc4276e5f1352e356e9267 Mon Sep 17 00:00:00 2001 From: grumbach Date: Fri, 25 Oct 2024 12:46:04 +0900 Subject: [PATCH 076/128] ci: fix upload download from different clients --- .github/workflows/memcheck.yml | 2 +- autonomi-cli/src/actions/download.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/memcheck.yml b/.github/workflows/memcheck.yml index c2e5406207..cbfb52d4cc 100644 --- a/.github/workflows/memcheck.yml +++ b/.github/workflows/memcheck.yml @@ -70,7 +70,7 @@ jobs: shell: bash - name: File upload - run: ./target/release/autonomi --log-output-dest=data-dir file upload "./the-test-data.zip" > ./upload_output 2>&1 + run: ./target/release/autonomi --log-output-dest=data-dir file upload --public "./the-test-data.zip" > ./upload_output 2>&1 env: SN_LOG: "v" timeout-minutes: 5 diff --git a/autonomi-cli/src/actions/download.rs b/autonomi-cli/src/actions/download.rs index 7beb3578f1..ff737ac2c1 100644 --- a/autonomi-cli/src/actions/download.rs +++ b/autonomi-cli/src/actions/download.rs @@ -26,7 +26,7 @@ pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Resul match (public_address, private_address) { (Some(public_address), _) => download_public(addr, public_address, dest_path, client).await, (_, Some(private_address)) => download_private(addr, private_address, dest_path, client).await, - _ => Err(eyre!("Failed to parse data address")) + _ => Err(eyre!("Failed to parse data address {addr}")) .with_suggestion(|| "Public addresses look like this: 0037cfa13eae4393841cbc00c3a33cade0f98b8c1f20826e5c51f8269e7b09d7") .with_suggestion(|| "Private addresses look like this: 1358645341480028172") .with_suggestion(|| "Try the `file list` command to get addresses you have access to"), From b17da951a08f6e5fc04c0d31bdbef97a7a454b44 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Fri, 25 Oct 2024 12:22:25 +0900 Subject: [PATCH 077/128] feat(kad): reduce bootstrap interval --- sn_networking/src/driver.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index af80223a84..f88157766e 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -131,6 +131,9 @@ const NETWORKING_CHANNEL_SIZE: usize = 10_000; /// Time before a Kad query times out if no response is received const KAD_QUERY_TIMEOUT_S: Duration = Duration::from_secs(10); +/// Periodic bootstrap interval +const KAD_PERIODIC_BOOTSTRAP_INTERVAL_S: Duration = Duration::from_secs(180 * 60); + // Init during compilation, instead of runtime error that should never happen // Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) const REPLICATION_FACTOR: NonZeroUsize = match NonZeroUsize::new(CLOSE_GROUP_SIZE) { @@ -363,6 +366,7 @@ impl NetworkBuilder { // Records never expire .set_record_ttl(None) .set_replication_factor(REPLICATION_FACTOR) + .set_periodic_bootstrap_interval(Some(KAD_PERIODIC_BOOTSTRAP_INTERVAL_S)) // Emit PUT events for validation prior to insertion into the RecordStore. // This is no longer needed as the record_storage::put now can carry out validation. // .set_record_filtering(KademliaStoreInserts::FilterBoth) From 43d7936f48081760f8e5fb9d9467b92e2935c576 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Thu, 17 Oct 2024 02:26:09 +0530 Subject: [PATCH 078/128] feat(autonomi): allow the uploader to work with the new apis --- Cargo.lock | 3 + autonomi/Cargo.toml | 3 + autonomi/src/client/external_signer.rs | 15 +- autonomi/src/client/mod.rs | 2 +- autonomi/src/client/registers.rs | 138 +-- autonomi/src/client/utils.rs | 13 +- autonomi/src/lib.rs | 2 + autonomi/src/uploader/mod.rs | 502 +++++++++++ autonomi/src/uploader/tests.rs | 1 + autonomi/src/uploader/upload.rs | 1125 ++++++++++++++++++++++++ autonomi/src/utils.rs | 7 +- sn_evm/src/lib.rs | 1 + 12 files changed, 1739 insertions(+), 73 deletions(-) create mode 100644 autonomi/src/uploader/mod.rs create mode 100644 autonomi/src/uploader/tests.rs create mode 100644 autonomi/src/uploader/upload.rs diff --git a/Cargo.lock b/Cargo.lock index d274255dbc..b0b5441302 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1086,11 +1086,14 @@ dependencies = [ "bytes", "console_error_panic_hook", "const-hex", + "custom_debug", + "dashmap", "evmlib", "eyre", "futures", "hex 0.4.3", "instant", + "itertools 0.12.1", "js-sys", "libp2p 0.54.1", "rand 0.8.5", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 3da273183e..37de39675a 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -30,9 +30,12 @@ bytes = { version = "1.0.1", features = ["serde"] } curv = { version = "0.10.1", package = "sn_curv", default-features = false, features = [ "num-bigint", ] } +custom_debug = "~0.6.1" +dashmap = "~6.1.0" eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } const-hex = "1.12.0" hex = "~0.4.3" +itertools = "~0.12.1" libp2p = "0.54.1" rand = "0.8.5" rmp-serde = "1.1.1" diff --git a/autonomi/src/client/external_signer.rs b/autonomi/src/client/external_signer.rs index b17002bd9c..69abdd6c20 100644 --- a/autonomi/src/client/external_signer.rs +++ b/autonomi/src/client/external_signer.rs @@ -3,7 +3,8 @@ use crate::client::utils::extract_quote_payments; use crate::self_encryption::encrypt; use crate::Client; use bytes::Bytes; -use sn_evm::{PaymentQuote, ProofOfPayment, QuotePayment}; +use sn_evm::{ProofOfPayment, QuotePayment}; +use sn_networking::PayeeQuote; use sn_protocol::storage::Chunk; use std::collections::HashMap; use xor_name::XorName; @@ -33,7 +34,7 @@ impl Client { data: Bytes, ) -> Result< ( - HashMap, + HashMap, Vec, Vec, ), @@ -41,15 +42,9 @@ impl Client { > { // Encrypt the data as chunks let (_data_map_chunk, _chunks, xor_names) = encrypt_data(data)?; - - let cost_map: HashMap = self - .get_store_quotes(xor_names.into_iter()) - .await? - .into_iter() - .map(|(name, (_, _, q))| (name, q)) - .collect(); - + let cost_map = self.get_store_quotes(xor_names.into_iter()).await?; let (quote_payments, free_chunks) = extract_quote_payments(&cost_map); + Ok((cost_map, quote_payments, free_chunks)) } diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index d530f210f2..0585efa037 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -31,7 +31,7 @@ pub mod vault; pub mod wasm; // private module with utility functions -mod utils; +pub(crate) mod utils; pub use sn_evm::Amount; diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 52f8944e1e..f7e14c0e09 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -11,6 +11,7 @@ pub use bls::SecretKey as RegisterSecretKey; use sn_evm::Amount; use sn_evm::AttoTokens; use sn_evm::EvmWalletError; +use sn_evm::ProofOfPayment; use sn_networking::VerificationKind; use sn_protocol::storage::RetryStrategy; pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; @@ -119,6 +120,36 @@ impl Register { Ok(()) } + + /// Merge two registers together. + pub(crate) fn merge(&mut self, other: &Self) -> Result<(), RegisterError> { + debug!("Merging Register of: {:?}", self.address()); + + other.signed_reg.verify().map_err(|_| { + error!( + "Failed to verify register at address: {:?}", + other.address() + ); + RegisterError::FailedVerification + })?; + + self.signed_reg.merge(&other.signed_reg).map_err(|err| { + error!("Failed to merge registers {}: {err}", self.address()); + RegisterError::Write(err) + })?; + + for op in other.signed_reg.ops() { + if let Err(err) = self.crdt_reg.apply_op(op.clone()) { + error!( + "Failed to apply {op:?} to Register {}: {err}", + self.address() + ); + return Err(RegisterError::Write(err)); + } + } + + Ok(()) + } } impl Client { @@ -160,13 +191,18 @@ impl Client { }; // Make sure the fetched record contains valid CRDT operations - signed_reg - .verify() - .map_err(|_| RegisterError::FailedVerification)?; + signed_reg.verify().map_err(|_| { + error!("Failed to verify register at address: {address}"); + RegisterError::FailedVerification + })?; let mut crdt_reg = RegisterCrdt::new(*signed_reg.address()); for op in signed_reg.ops() { if let Err(err) = crdt_reg.apply_op(op.clone()) { + error!( + "Failed to apply {op:?} to Register {address}: {err}", + address = signed_reg.address() + ); return Err(RegisterError::Write(err)); } } @@ -186,18 +222,6 @@ impl Client { ) -> Result<(), RegisterError> { register.write_atop(&new_value, &owner)?; - let signed_register = register.signed_reg.clone(); - - // Prepare the record for network storage - let record = Record { - key: NetworkAddress::from_register_address(*register.address()).to_record_key(), - value: try_serialize_record(&signed_register, RecordKind::Register) - .map_err(|_| RegisterError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; - let get_cfg = GetRecordCfg { get_quorum: Quorum::Majority, retry_strategy: Some(RetryStrategy::default()), @@ -212,16 +236,7 @@ impl Client { verification: Some((VerificationKind::Network, get_cfg)), }; - // Store the updated register on the network - self.network - .put_record(record, &put_cfg) - .await - .inspect_err(|err| { - error!( - "Failed to put record - register {:?} to the network: {err}", - register.address() - ) - })?; + self.register_upload(®ister, None, &put_cfg).await?; Ok(()) } @@ -303,31 +318,16 @@ impl Client { .inspect_err(|err| { error!("Failed to pay for register at address: {address} : {err}") })?; - let proof = if let Some(proof) = payment_proofs.get(®_xor) { - proof - } else { + let proof = payment_proofs.get(®_xor).ok_or_else(|| { // register was skipped, meaning it was already paid for error!("Register at address: {address} was already paid for"); - return Err(RegisterError::Network(NetworkError::RegisterAlreadyExists)); - }; + RegisterError::Network(NetworkError::RegisterAlreadyExists) + })?; let payee = proof .to_peer_id_payee() .ok_or(RegisterError::InvalidQuote) .inspect_err(|err| error!("Failed to get payee from payment proof: {err}"))?; - let signed_register = register.signed_reg.clone(); - - let record = Record { - key: NetworkAddress::from_register_address(*address).to_record_key(), - value: try_serialize_record( - &(proof, &signed_register), - RecordKind::RegisterWithPayment, - ) - .map_err(|_| RegisterError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; let get_cfg = GetRecordCfg { get_quorum: Quorum::Majority, @@ -343,13 +343,8 @@ impl Client { verification: Some((VerificationKind::Network, get_cfg)), }; - debug!("Storing register at address {address} to the network"); - self.network - .put_record(record, &put_cfg) - .await - .inspect_err(|err| { - error!("Failed to put record - register {address} to the network: {err}") - })?; + self.register_upload(®ister, Some(proof), &put_cfg) + .await?; if let Some(channel) = self.client_event_sender.as_ref() { let summary = UploadSummary { @@ -363,4 +358,47 @@ impl Client { Ok(register) } + + pub(crate) async fn register_upload( + &self, + register: &Register, + payment: Option<&ProofOfPayment>, + put_cfg: &PutRecordCfg, + ) -> Result<(), RegisterError> { + let signed_register = ®ister.signed_reg; + let record = if let Some(proof) = payment { + Record { + key: NetworkAddress::from_register_address(*register.address()).to_record_key(), + value: try_serialize_record( + &(proof, signed_register), + RecordKind::RegisterWithPayment, + ) + .map_err(|_| RegisterError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + } + } else { + Record { + key: NetworkAddress::from_register_address(*register.address()).to_record_key(), + value: try_serialize_record(signed_register, RecordKind::Register) + .map_err(|_| RegisterError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + } + }; + + self.network + .put_record(record, put_cfg) + .await + .inspect_err(|err| { + error!( + "Failed to put record - register {:?} to the network: {err}", + register.address() + ) + })?; + + Ok(()) + } } diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index 68ae70f2f7..4e0c0b27c8 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -12,7 +12,7 @@ use bytes::Bytes; use libp2p::kad::{Quorum, Record}; use rand::{thread_rng, Rng}; use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; -use sn_evm::{EvmWallet, PaymentQuote, ProofOfPayment, QuotePayment}; +use sn_evm::{EvmWallet, ProofOfPayment, QuotePayment}; use sn_networking::{ GetRecordCfg, Network, NetworkError, PayeeQuote, PutRecordCfg, VerificationKind, }; @@ -149,12 +149,7 @@ impl Client { content_addrs: impl Iterator, wallet: &EvmWallet, ) -> Result<(HashMap, Vec), PayError> { - let cost_map = self - .get_store_quotes(content_addrs) - .await? - .into_iter() - .map(|(name, (_, _, q))| (name, q)) - .collect(); + let cost_map = self.get_store_quotes(content_addrs).await?; let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); @@ -233,12 +228,12 @@ async fn fetch_store_quote( /// Form to be executed payments and already executed payments from a cost map. pub(crate) fn extract_quote_payments( - cost_map: &HashMap, + cost_map: &HashMap, ) -> (Vec, Vec) { let mut to_be_paid = vec![]; let mut already_paid = vec![]; - for (chunk_address, quote) in cost_map.iter() { + for (chunk_address, (_, _, quote)) in cost_map.iter() { if quote.cost.is_zero() { already_paid.push(*chunk_address); } else { diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index c73bef1378..6a1476d900 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -33,6 +33,8 @@ extern crate tracing; pub mod client; +pub mod uploader; + #[cfg(feature = "data")] mod self_encryption; mod utils; diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs new file mode 100644 index 0000000000..2677a10b17 --- /dev/null +++ b/autonomi/src/uploader/mod.rs @@ -0,0 +1,502 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#[cfg(test)] +mod tests; +mod upload; + +use crate::client::data::PutError; +use crate::client::registers::{Register, RegisterError}; +use crate::Client; +use itertools::Either; +use sn_evm::EvmWallet; +use sn_evm::{Amount, EvmNetworkTokenError, ProofOfPayment}; +use sn_networking::{NetworkError, PayeeQuote}; +use sn_protocol::{ + storage::{Chunk, ChunkAddress, RetryStrategy}, + NetworkAddress, +}; +use sn_registers::RegisterAddress; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + path::PathBuf, +}; +use tokio::sync::mpsc; +use upload::InnerUploader; +use xor_name::XorName; + +/// The default batch size that determines the number of data that are processed in parallel. +/// This includes fetching the store cost, uploading and verifying the data. +/// Use PAYMENT_BATCH_SIZE to control the number of payments made in a single transaction. +pub const BATCH_SIZE: usize = 16; + +/// The number of payments to make in a single EVM transaction. +pub const PAYMENT_BATCH_SIZE: usize = 512; + +/// The number of repayments to attempt for a failed item before returning an error. +/// If value = 1, we do an initial payment & 1 repayment. Thus we make a max 2 payments per data item. +#[cfg(not(test))] +pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 3; +#[cfg(test)] +pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 1; + +#[derive(Debug, thiserror::Error)] +pub enum UploadError { + #[error("Internal Error")] + InternalError, + #[error("Invalid cfg: {0:?}")] + InvalidCfg(String), + #[error("I/O error: {0:?}")] + Io(#[from] std::io::Error), + #[error("The upload failed with maximum repayments reached for multiple items: {items:?} Summary: {summary:?}")] + MaximumRepaymentsReached { + items: Vec, + summary: UploadSummary, + }, + #[error("Network error: {0:?}")] + Network(#[from] NetworkError), + #[error("Put error: {0:?}")] + PutError(#[from] PutError), + #[error("Register error: {0:?}")] + RegisterError(#[from] RegisterError), + #[error("Multiple consecutive network errors reported during upload")] + SequentialNetworkErrors, + #[error("Too many sequential payment errors reported during upload")] + SequentialUploadPaymentError, + #[error("Network Token error: {0:?}")] + EvmNetworkTokenError(#[from] EvmNetworkTokenError), +} + +/// The set of options to pass into the `Uploader` +#[derive(Debug, Clone, Copy)] +pub struct UploadCfg { + pub batch_size: usize, + pub payment_batch_size: usize, + pub verify_store: bool, + pub show_holders: bool, + pub retry_strategy: RetryStrategy, + pub max_repayments_for_failed_data: usize, + pub collect_registers: bool, +} + +impl Default for UploadCfg { + fn default() -> Self { + Self { + batch_size: BATCH_SIZE, + payment_batch_size: PAYMENT_BATCH_SIZE, + verify_store: true, + show_holders: false, + retry_strategy: RetryStrategy::Balanced, + max_repayments_for_failed_data: MAX_REPAYMENTS_PER_FAILED_ITEM, + collect_registers: false, + } + } +} + +/// The result of a successful upload. +#[derive(Debug, Clone, Default)] +pub struct UploadSummary { + pub storage_cost: Amount, + pub final_balance: Amount, + pub uploaded_addresses: HashSet, + pub uploaded_registers: HashMap, + pub uploaded_count: usize, + pub skipped_count: usize, +} + +impl UploadSummary { + /// Merge two UploadSummary together. + pub fn merge(mut self, other: Self) -> Result> { + self.uploaded_addresses.extend(other.uploaded_addresses); + self.uploaded_registers.extend(other.uploaded_registers); + + let summary = Self { + storage_cost: self + .storage_cost + .checked_add(other.storage_cost) + .ok_or_else(|| { + error!("Failed to merge UploadSummary: NumericOverflow"); + UploadError::InternalError + })?, + final_balance: self + .final_balance + .checked_add(other.storage_cost) + .ok_or_else(|| { + error!("Failed to merge UploadSummary: NumericOverflow"); + UploadError::InternalError + })?, + uploaded_addresses: self.uploaded_addresses, + uploaded_registers: self.uploaded_registers, + uploaded_count: self.uploaded_count + other.uploaded_count, + skipped_count: self.skipped_count + other.skipped_count, + }; + Ok(summary) + } +} + +#[derive(Debug, Clone)] +/// The events emitted from the upload process. +pub enum UploadEvent { + /// Uploaded a record to the network. + ChunkUploaded(ChunkAddress), + /// Uploaded a Register to the network. + /// The returned register is just the passed in register. + RegisterUploaded(Register), + /// + /// The Chunk already exists in the network. No payments were made. + ChunkAlreadyExistsInNetwork(ChunkAddress), + /// The Register already exists in the network. The locally register changes were pushed to the network. + /// No payments were made. + /// The returned register contains the remote replica merged with the passed in register. + RegisterUpdated(Register), + /// Payment for a batch of records has been made. + PaymentMade { tokens_spent: Amount }, + /// The upload process has terminated with an error. + // Note: We cannot send the Error enum as it does not implement Clone. So we cannot even do Result if + // we also want to return this error from the function. + Error, +} + +pub struct Uploader { + // Has to be stored as an Option as we have to take ownership of inner during the upload. + inner: Option, +} + +impl Uploader { + /// Start the upload process. + pub async fn start_upload(mut self) -> Result { + let event_sender = self + .inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .event_sender + .clone(); + match upload::start_upload(Box::new(self)).await { + Err(err) => { + if let Some(event_sender) = event_sender { + if let Err(err) = event_sender.send(UploadEvent::Error).await { + error!("Error while emitting event: {err:?}"); + } + } + Err(err) + } + Ok(summary) => Ok(summary), + } + } + + /// Creates a new instance of `Uploader` with the default configuration. + /// To modify the configuration, use the provided setter methods (`set_...` functions). + // NOTE: Self has to be constructed only using this method. We expect `Self::inner` is present everywhere. + pub fn new(client: Client, wallet: EvmWallet) -> Self { + Self { + inner: Some(InnerUploader::new(client, wallet)), + } + } + + /// Update all the configurations by passing the `UploadCfg` struct + pub fn set_upload_cfg(&mut self, cfg: UploadCfg) { + // Self can only be constructed with new(), which will set inner to InnerUploader always. + // So it is okay to call unwrap here. + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .set_cfg(cfg); + } + + /// Sets the default batch size that determines the number of data that are processed in parallel. + /// + /// By default, this option is set to the constant `BATCH_SIZE: usize = 16`. + pub fn set_batch_size(&mut self, batch_size: usize) { + // Self can only be constructed with new(), which will set inner to InnerUploader always. + // So it is okay to call unwrap here. + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .set_batch_size(batch_size); + } + + /// Sets the option to verify the data after they have been uploaded. + /// + /// By default, this option is set to true. + pub fn set_verify_store(&mut self, verify_store: bool) { + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .set_verify_store(verify_store); + } + + /// Sets the option to display the holders that are expected to be holding the data during verification. + /// + /// By default, this option is set to false. + pub fn set_show_holders(&mut self, show_holders: bool) { + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .set_show_holders(show_holders); + } + + /// Sets the RetryStrategy to increase the re-try during the GetStoreCost & Upload tasks. + /// This does not affect the retries during the Payment task. Use `set_max_repayments_for_failed_data` to + /// configure the re-payment attempts. + /// + /// By default, this option is set to RetryStrategy::Quick + pub fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .set_retry_strategy(retry_strategy); + } + + /// Sets the maximum number of repayments to perform if the initial payment failed. + /// NOTE: This creates an extra Spend and uses the wallet funds. + /// + /// By default, this option is set to 1 retry. + pub fn set_max_repayments_for_failed_data(&mut self, retries: usize) { + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .set_max_repayments_for_failed_data(retries); + } + + /// Enables the uploader to return all the registers that were Uploaded or Updated. + /// The registers are emitted through the event channel whenever they're completed, but this returns them + /// through the UploadSummary when the whole upload process completes. + /// + /// By default, this option is set to False + pub fn set_collect_registers(&mut self, collect_registers: bool) { + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .set_collect_registers(collect_registers); + } + + /// Returns a receiver for UploadEvent. + /// This method is optional and the upload process can be performed without it. + pub fn get_event_receiver(&mut self) -> mpsc::Receiver { + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .get_event_receiver() + } + + /// Insert a list of chunk paths to upload to upload. + pub fn insert_chunk_paths(&mut self, chunks: impl IntoIterator) { + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .insert_chunk_paths(chunks); + } + + /// Insert a list of chunks to upload to upload. + pub fn insert_chunks(&mut self, chunks: impl IntoIterator) { + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .insert_chunks(chunks); + } + + /// Insert a list of registers to upload. + pub fn insert_register(&mut self, registers: impl IntoIterator) { + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .insert_register(registers); + } +} + +// ======= Private ======== + +/// An interface to make the testing easier by not interacting with the network. +trait UploaderInterface: Send + Sync { + fn take_inner_uploader(&mut self) -> InnerUploader; + + // Mutable reference is used in tests. + fn submit_get_register_task( + &mut self, + client: Client, + reg_addr: RegisterAddress, + task_result_sender: mpsc::Sender, + ); + + fn submit_push_register_task( + &mut self, + client: Client, + upload_item: UploadItem, + verify_store: bool, + task_result_sender: mpsc::Sender, + ); + + #[expect(clippy::too_many_arguments)] + fn submit_get_store_cost_task( + &mut self, + client: Client, + xorname: XorName, + address: NetworkAddress, + previous_payments: Option<&Vec>, + get_store_cost_strategy: GetStoreCostStrategy, + max_repayments_for_failed_data: usize, + task_result_sender: mpsc::Sender, + ); + + fn submit_make_payment_task( + &mut self, + to_send: Option<(UploadItem, Box)>, + make_payment_sender: mpsc::Sender)>>, + ); + + fn submit_upload_item_task( + &mut self, + upload_item: UploadItem, + client: Client, + previous_payments: Option<&Vec>, + verify_store: bool, + retry_strategy: RetryStrategy, + task_result_sender: mpsc::Sender, + ); +} + +// Configuration functions are used in tests. So these are defined here and re-used inside `Uploader` +impl InnerUploader { + pub(super) fn set_cfg(&mut self, cfg: UploadCfg) { + self.cfg = cfg; + } + + pub(super) fn set_batch_size(&mut self, batch_size: usize) { + self.cfg.batch_size = batch_size; + } + + pub(super) fn set_verify_store(&mut self, verify_store: bool) { + self.cfg.verify_store = verify_store; + } + + pub(super) fn set_show_holders(&mut self, show_holders: bool) { + self.cfg.show_holders = show_holders; + } + + pub(super) fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { + self.cfg.retry_strategy = retry_strategy; + } + + pub(super) fn set_max_repayments_for_failed_data(&mut self, retries: usize) { + self.cfg.max_repayments_for_failed_data = retries; + } + + pub(super) fn set_collect_registers(&mut self, collect_registers: bool) { + self.cfg.collect_registers = collect_registers; + } + + pub(super) fn get_event_receiver(&mut self) -> mpsc::Receiver { + let (tx, rx) = mpsc::channel(100); + self.event_sender = Some(tx); + rx + } + + pub(super) fn insert_chunk_paths( + &mut self, + chunks: impl IntoIterator, + ) { + self.all_upload_items + .extend(chunks.into_iter().map(|(xorname, path)| { + let item = UploadItem::Chunk { + address: ChunkAddress::new(xorname), + chunk: Either::Right(path), + }; + (xorname, item) + })); + } + + pub(super) fn insert_chunks(&mut self, chunks: impl IntoIterator) { + self.all_upload_items + .extend(chunks.into_iter().map(|chunk| { + let xorname = *chunk.name(); + let item = UploadItem::Chunk { + address: *chunk.address(), + chunk: Either::Left(chunk), + }; + (xorname, item) + })); + } + + pub(super) fn insert_register(&mut self, registers: impl IntoIterator) { + self.all_upload_items + .extend(registers.into_iter().map(|reg| { + let address = *reg.address(); + let item = UploadItem::Register { address, reg }; + (address.xorname(), item) + })); + } +} + +#[derive(Debug, Clone)] +enum UploadItem { + Chunk { + address: ChunkAddress, + // Either the actual chunk or the path to the chunk. + chunk: Either, + }, + Register { + address: RegisterAddress, + reg: Register, + }, +} + +impl UploadItem { + fn address(&self) -> NetworkAddress { + match self { + Self::Chunk { address, .. } => NetworkAddress::from_chunk_address(*address), + Self::Register { address, .. } => NetworkAddress::from_register_address(*address), + } + } + + fn xorname(&self) -> XorName { + match self { + UploadItem::Chunk { address, .. } => *address.xorname(), + UploadItem::Register { address, .. } => address.xorname(), + } + } +} + +#[derive(Debug)] +enum TaskResult { + GetRegisterFromNetworkOk { + remote_register: Register, + }, + GetRegisterFromNetworkErr(XorName), + PushRegisterOk { + updated_register: Register, + }, + PushRegisterErr(XorName), + GetStoreCostOk { + xorname: XorName, + quote: Box, + }, + GetStoreCostErr { + xorname: XorName, + get_store_cost_strategy: GetStoreCostStrategy, + max_repayments_reached: bool, + }, + MakePaymentsOk { + payment_proofs: HashMap, + }, + MakePaymentsErr { + failed_xornames: Vec<(XorName, Box)>, + }, + UploadOk(XorName), + UploadErr { + xorname: XorName, + }, +} + +#[derive(Debug, Clone)] +enum GetStoreCostStrategy { + /// Selects the PeerId with the lowest quote + Cheapest, + /// Selects the cheapest PeerId that we have not made payment to. + SelectDifferentPayee, +} diff --git a/autonomi/src/uploader/tests.rs b/autonomi/src/uploader/tests.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/autonomi/src/uploader/tests.rs @@ -0,0 +1 @@ + diff --git a/autonomi/src/uploader/upload.rs b/autonomi/src/uploader/upload.rs new file mode 100644 index 0000000000..665396430e --- /dev/null +++ b/autonomi/src/uploader/upload.rs @@ -0,0 +1,1125 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +// TODO: fix +#![allow(clippy::result_large_err)] + +use super::{ + GetStoreCostStrategy, TaskResult, UploadCfg, UploadEvent, UploadItem, UploadSummary, Uploader, + UploaderInterface, PAYMENT_BATCH_SIZE, +}; +use crate::{ + client::registers::Register, uploader::UploadError, + utils::payment_proof_from_quotes_and_payments, Client, +}; +use bytes::Bytes; +use itertools::Either; +use libp2p::{kad::Quorum, PeerId}; +use sn_evm::{Amount, EvmWallet, ProofOfPayment}; +use sn_networking::{GetRecordCfg, PayeeQuote, PutRecordCfg, VerificationKind}; +use sn_protocol::{ + storage::{Chunk, RetryStrategy}, + NetworkAddress, +}; +use sn_registers::RegisterAddress; +use std::collections::{HashMap, HashSet}; +use tokio::sync::mpsc; +use xor_name::XorName; + +/// The maximum number of sequential payment failures before aborting the upload process. +#[cfg(not(test))] +const MAX_SEQUENTIAL_PAYMENT_FAILS: usize = 3; +#[cfg(test)] +const MAX_SEQUENTIAL_PAYMENT_FAILS: usize = 1; + +/// The maximum number of sequential network failures before aborting the upload process. +// todo: use uploader.retry_strategy.get_count() instead. +#[cfg(not(test))] +const MAX_SEQUENTIAL_NETWORK_ERRORS: usize = 32; +#[cfg(test)] +const MAX_SEQUENTIAL_NETWORK_ERRORS: usize = 1; + +/// The number of upload failures for a single data item before +#[cfg(not(test))] +const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 3; +#[cfg(test)] +const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 1; + +type Result = std::result::Result; + +// TODO: +// 1. since wallet balance is not fetched after finishing a task, get it before we send OK/Err +// 2. Rework client event, it should be sent via the lowest level of the PUT. while for chunks it is done earlier (data.rs) +// 3. track each batch with an id +// 1. log whenever we insert/remove items. i.e., don't ignore values with `let _` + +/// The main loop that performs the upload process. +/// An interface is passed here for easy testing. +pub(super) async fn start_upload( + mut interface: Box, +) -> Result { + let mut uploader = interface.take_inner_uploader(); + + uploader.validate_upload_cfg()?; + + // Take out the testing task senders if any. This is only set for tests. + let (task_result_sender, mut task_result_receiver) = + if let Some(channels) = uploader.testing_task_channels.take() { + channels + } else { + // 6 because of the 6 pipelines, 1 for redundancy. + mpsc::channel(uploader.cfg.batch_size * 6 + 1) + }; + let (make_payment_sender, make_payment_receiver) = mpsc::channel(uploader.cfg.batch_size); + + uploader.start_payment_processing_thread( + make_payment_receiver, + task_result_sender.clone(), + uploader.cfg.payment_batch_size, + )?; + + // chunks can be pushed to pending_get_store_cost directly + uploader.pending_to_get_store_cost = uploader + .all_upload_items + .iter() + .filter_map(|(xorname, item)| { + if let UploadItem::Chunk { .. } = item { + Some((*xorname, GetStoreCostStrategy::Cheapest)) + } else { + None + } + }) + .collect(); + + // registers have to be verified + merged with remote replica, so we have to fetch it first. + uploader.pending_to_get_register = uploader + .all_upload_items + .iter() + .filter_map(|(_xorname, item)| { + if let UploadItem::Register { address, .. } = item { + Some(*address) + } else { + None + } + }) + .collect(); + + loop { + // Break if we have uploaded all the items. + // The loop also breaks if we fail to get_store_cost / make payment / upload for n consecutive times. + if uploader.all_upload_items.is_empty() { + debug!("Upload items are empty, exiting main upload loop."); + // To avoid empty final_balance when all items are skipped. + uploader.upload_final_balance = + uploader + .wallet + .balance_of_tokens() + .await + .inspect_err(|err| { + error!("Failed to get wallet balance: {err:?}"); + })?; + + #[cfg(test)] + trace!("UPLOADER STATE: finished uploading all items {uploader:?}"); + let summary = UploadSummary { + storage_cost: uploader.tokens_spent, + final_balance: uploader.upload_final_balance, + uploaded_addresses: uploader.uploaded_addresses, + uploaded_count: uploader.uploaded_count, + skipped_count: uploader.skipped_count, + uploaded_registers: uploader.uploaded_registers, + }; + + if !uploader.max_repayments_reached.is_empty() { + error!( + "The maximum repayments were reached for these addresses: {:?}", + uploader.max_repayments_reached + ); + return Err(UploadError::MaximumRepaymentsReached { + items: uploader.max_repayments_reached.into_iter().collect(), + summary, + }); + } + + return Ok(summary); + } + + // try to GET register if we have enough buffer. + // The results of the get & push register steps are used to fill up `pending_to_get_store` cost + // Since the get store cost list is the init state, we don't have to check if it is not full. + while !uploader.pending_to_get_register.is_empty() + && uploader.on_going_get_register.len() < uploader.cfg.batch_size + { + if let Some(reg_addr) = uploader.pending_to_get_register.pop() { + trace!("Conditions met for GET registers {:?}", reg_addr.xorname()); + let _ = uploader.on_going_get_register.insert(reg_addr.xorname()); + interface.submit_get_register_task( + uploader.client.clone(), + reg_addr, + task_result_sender.clone(), + ); + } + } + + // try to push register if we have enough buffer. + // No other checks for the same reason as the above step. + while !uploader.pending_to_push_register.is_empty() + && uploader.on_going_get_register.len() < uploader.cfg.batch_size + { + let upload_item = uploader.pop_item_for_push_register()?; + trace!( + "Conditions met for push registers {:?}", + upload_item.xorname() + ); + let _ = uploader + .on_going_push_register + .insert(upload_item.xorname()); + interface.submit_push_register_task( + uploader.client.clone(), + upload_item, + uploader.cfg.verify_store, + task_result_sender.clone(), + ); + } + + // try to get store cost for an item if pending_to_pay needs items & if we have enough buffer. + while !uploader.pending_to_get_store_cost.is_empty() + && uploader.on_going_get_cost.len() < uploader.cfg.batch_size + && uploader.pending_to_pay.len() < uploader.cfg.payment_batch_size + { + let (xorname, address, get_store_cost_strategy) = + uploader.pop_item_for_get_store_cost()?; + trace!("Conditions met for get store cost. {xorname:?} {get_store_cost_strategy:?}",); + + let _ = uploader.on_going_get_cost.insert(xorname); + interface.submit_get_store_cost_task( + uploader.client.clone(), + xorname, + address, + uploader.payment_proofs.get(&xorname), + get_store_cost_strategy, + uploader.cfg.max_repayments_for_failed_data, + task_result_sender.clone(), + ); + } + + // try to make payment for an item if pending_to_upload needs items & if we have enough buffer. + while !uploader.pending_to_pay.is_empty() + && uploader.on_going_payments.len() < uploader.cfg.payment_batch_size + && uploader.pending_to_upload.len() < uploader.cfg.batch_size + { + let (upload_item, quote) = uploader.pop_item_for_make_payment()?; + trace!( + "Conditions met for making payments. {:?} {quote:?}", + upload_item.xorname() + ); + let _ = uploader.on_going_payments.insert(upload_item.xorname()); + + interface + .submit_make_payment_task(Some((upload_item, quote)), make_payment_sender.clone()); + } + + // try to upload if we have enough buffer to upload. + while !uploader.pending_to_upload.is_empty() + && uploader.on_going_uploads.len() < uploader.cfg.batch_size + { + #[cfg(test)] + trace!("UPLOADER STATE: upload_item : {uploader:?}"); + let upload_item = uploader.pop_item_for_upload_item()?; + let xorname = upload_item.xorname(); + + trace!("Conditions met for uploading. {xorname:?}"); + let _ = uploader.on_going_uploads.insert(xorname); + interface.submit_upload_item_task( + upload_item, + uploader.client.clone(), + uploader.payment_proofs.get(&xorname), + uploader.cfg.verify_store, + uploader.cfg.retry_strategy, + task_result_sender.clone(), + ); + } + + // Fire None to trigger a forced round of making leftover payments, if there are not enough store cost tasks + // to fill up the buffer. + if uploader.pending_to_get_store_cost.is_empty() + && uploader.on_going_get_cost.is_empty() + && !uploader.on_going_payments.is_empty() + && uploader.on_going_payments.len() < uploader.cfg.payment_batch_size + { + #[cfg(test)] + trace!("UPLOADER STATE: make_payment (forced): {uploader:?}"); + + debug!("There are not enough on going payments to trigger a batch Payment and no get_store_costs to fill the batch. Triggering forced round of payment"); + interface.submit_make_payment_task(None, make_payment_sender.clone()); + } + + #[cfg(test)] + trace!("UPLOADER STATE: before await task result: {uploader:?}"); + + trace!("Fetching task result"); + let task_result = task_result_receiver + .recv() + .await + .ok_or(UploadError::InternalError)?; + trace!("Received task result: {task_result:?}"); + match task_result { + TaskResult::GetRegisterFromNetworkOk { remote_register } => { + // if we got back the register, then merge & PUT it. + let xorname = remote_register.address().xorname(); + trace!("TaskResult::GetRegisterFromNetworkOk for remote register: {xorname:?} \n{remote_register:?}"); + let _ = uploader.on_going_get_register.remove(&xorname); + + let reg = uploader.all_upload_items.get_mut(&xorname).ok_or_else(|| { + error!("Register {xorname:?} not found in all_upload_items."); + UploadError::InternalError + })?; + if let UploadItem::Register { reg, .. } = reg { + reg.merge(&remote_register).inspect_err(|err| { + error!("Uploader failed to merge remote register: {err:?}"); + })?; + uploader.pending_to_push_register.push(xorname); + } + } + TaskResult::GetRegisterFromNetworkErr(xorname) => { + // then the register is a new one. It can follow the same flow as chunks now. + let _ = uploader.on_going_get_register.remove(&xorname); + + uploader + .pending_to_get_store_cost + .push((xorname, GetStoreCostStrategy::Cheapest)); + } + TaskResult::PushRegisterOk { updated_register } => { + // push modifies the register, so we return this instead of the one from all_upload_items + let xorname = updated_register.address().xorname(); + let _ = uploader.on_going_push_register.remove(&xorname); + uploader.skipped_count += 1; + let _ = uploader + .uploaded_addresses + .insert(NetworkAddress::from_register_address( + *updated_register.address(), + )); + + let _old_register = + uploader.all_upload_items.remove(&xorname).ok_or_else(|| { + error!("Register {xorname:?} not found in all_upload_items"); + UploadError::InternalError + })?; + + if uploader.cfg.collect_registers { + let _ = uploader + .uploaded_registers + .insert(*updated_register.address(), updated_register.clone()); + } + uploader.emit_upload_event(UploadEvent::RegisterUpdated(updated_register)); + } + TaskResult::PushRegisterErr(xorname) => { + // the register failed to be Pushed. Retry until failure. + let _ = uploader.on_going_push_register.remove(&xorname); + uploader.pending_to_push_register.push(xorname); + + uploader.push_register_errors += 1; + if uploader.push_register_errors > MAX_SEQUENTIAL_NETWORK_ERRORS { + error!("Max sequential network failures reached during PushRegisterErr."); + return Err(UploadError::SequentialNetworkErrors); + } + } + TaskResult::GetStoreCostOk { xorname, quote } => { + let _ = uploader.on_going_get_cost.remove(&xorname); + uploader.get_store_cost_errors = 0; // reset error if Ok. We only throw error after 'n' sequential errors + + trace!("GetStoreCostOk for {xorname:?}'s store_cost {:?}", quote.2); + + if !quote.2.cost.is_zero() { + uploader.pending_to_pay.push((xorname, quote)); + } + // if cost is 0, then it already in the network. + else { + // remove the item since we have uploaded it. + let removed_item = + uploader.all_upload_items.remove(&xorname).ok_or_else(|| { + error!("Uploadable item not found in all_upload_items: {xorname:?}"); + UploadError::InternalError + })?; + let _ = uploader.uploaded_addresses.insert(removed_item.address()); + trace!("{xorname:?} has store cost of 0 and it already exists on the network"); + uploader.skipped_count += 1; + + // if during the first try we skip the item, then it is already present in the network. + match removed_item { + UploadItem::Chunk { address, .. } => { + uploader.emit_upload_event(UploadEvent::ChunkAlreadyExistsInNetwork( + address, + )); + } + + UploadItem::Register { reg, .. } => { + if uploader.cfg.collect_registers { + let _ = uploader + .uploaded_registers + .insert(*reg.address(), reg.clone()); + } + uploader.emit_upload_event(UploadEvent::RegisterUpdated(reg)); + } + } + } + } + TaskResult::GetStoreCostErr { + xorname, + get_store_cost_strategy, + max_repayments_reached, + } => { + let _ = uploader.on_going_get_cost.remove(&xorname); + trace!("GetStoreCostErr for {xorname:?} , get_store_cost_strategy: {get_store_cost_strategy:?}, max_repayments_reached: {max_repayments_reached:?}"); + + // If max repayments reached, track it separately. Else retry get_store_cost. + if max_repayments_reached { + error!("Max repayments reached for {xorname:?}. Skipping upload for it"); + uploader.max_repayments_reached.insert(xorname); + uploader.all_upload_items.remove(&xorname); + } else { + // use the same strategy. The repay different payee is set only if upload fails. + uploader + .pending_to_get_store_cost + .push((xorname, get_store_cost_strategy.clone())); + } + uploader.get_store_cost_errors += 1; + if uploader.get_store_cost_errors > MAX_SEQUENTIAL_NETWORK_ERRORS { + error!("Max sequential network failures reached during GetStoreCostErr."); + return Err(UploadError::SequentialNetworkErrors); + } + } + TaskResult::MakePaymentsOk { payment_proofs } => { + let tokens_spent = payment_proofs + .values() + .map(|proof| proof.quote.cost.as_atto()) + .try_fold(Amount::from(0), |acc, cost| acc.checked_add(cost)) + .ok_or_else(|| { + error!("Overflow when summing up tokens spent"); + UploadError::InternalError + })?; + trace!( + "MakePaymentsOk for {} items, with {tokens_spent:?} tokens.", + payment_proofs.len(), + ); + for xorname in payment_proofs.keys() { + let _ = uploader.on_going_payments.remove(xorname); + } + uploader + .pending_to_upload + .extend(payment_proofs.keys().cloned()); + for (xorname, proof) in payment_proofs { + if let Some(payments) = uploader.payment_proofs.get_mut(&xorname) { + payments.push(proof) + } else { + uploader.payment_proofs.insert(xorname, vec![proof]); + } + } + // reset sequential payment fail error if ok. We throw error if payment fails continuously more than + // MAX_SEQUENTIAL_PAYMENT_FAILS errors. + uploader.make_payments_errors = 0; + uploader.tokens_spent = uploader + .tokens_spent + .checked_add(tokens_spent) + .ok_or_else(|| { + error!("Overflow when summing up tokens spent for summary."); + UploadError::InternalError + })?; + + uploader.emit_upload_event(UploadEvent::PaymentMade { tokens_spent }); + } + TaskResult::MakePaymentsErr { failed_xornames } => { + trace!("MakePaymentsErr for {:?} items", failed_xornames.len()); + // TODO: handle insufficient balance error + + for (xorname, quote) in failed_xornames { + let _ = uploader.on_going_payments.remove(&xorname); + uploader.pending_to_pay.push((xorname, quote)); + } + uploader.make_payments_errors += 1; + + if uploader.make_payments_errors >= MAX_SEQUENTIAL_PAYMENT_FAILS { + error!("Max sequential upload failures reached during MakePaymentsErr."); + // Too many sequential overall payment failure indicating + // unrecoverable failure of spend tx continuously rejected by network. + // The entire upload process shall be terminated. + return Err(UploadError::SequentialUploadPaymentError); + } + } + TaskResult::UploadOk(xorname) => { + let _ = uploader.on_going_uploads.remove(&xorname); + uploader.uploaded_count += 1; + trace!("UploadOk for {xorname:?}"); + // remove the previous payments + uploader.payment_proofs.remove(&xorname); + // remove the item since we have uploaded it. + let removed_item = uploader.all_upload_items.remove(&xorname).ok_or_else(|| { + error!("Uploadable item not found in all_upload_items: {xorname:?}"); + UploadError::InternalError + })?; + let _ = uploader.uploaded_addresses.insert(removed_item.address()); + + match removed_item { + UploadItem::Chunk { address, .. } => { + uploader.emit_upload_event(UploadEvent::ChunkUploaded(address)); + } + UploadItem::Register { reg, .. } => { + if uploader.cfg.collect_registers { + let _ = uploader + .uploaded_registers + .insert(*reg.address(), reg.clone()); + } + uploader.emit_upload_event(UploadEvent::RegisterUploaded(reg)); + } + } + } + TaskResult::UploadErr { xorname } => { + let _ = uploader.on_going_uploads.remove(&xorname); + trace!("UploadErr for {xorname:?}"); + + // keep track of the failure + let n_errors = uploader.n_errors_during_uploads.entry(xorname).or_insert(0); + *n_errors += 1; + + // if quote has expired, don't retry the upload again. Instead get the cheapest quote again. + if *n_errors > UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE { + // if error > threshold, then select different payee. else retry again + // Also reset n_errors as we want to enable retries for the new payee. + *n_errors = 0; + debug!("Max error during upload reached for {xorname:?}. Selecting a different payee."); + + uploader + .pending_to_get_store_cost + .push((xorname, GetStoreCostStrategy::SelectDifferentPayee)); + } else { + uploader.pending_to_upload.push(xorname); + } + } + } + } +} + +impl UploaderInterface for Uploader { + fn take_inner_uploader(&mut self) -> InnerUploader { + self.inner + .take() + .expect("Uploader::new makes sure inner is present") + } + + fn submit_get_store_cost_task( + &mut self, + client: Client, + xorname: XorName, + address: NetworkAddress, + previous_payments: Option<&Vec>, + get_store_cost_strategy: GetStoreCostStrategy, + max_repayments_for_failed_data: usize, + task_result_sender: mpsc::Sender, + ) { + trace!("Spawning get_store_cost for {xorname:?}"); + let previous_payments_to = if let Some(previous_payments) = previous_payments { + let peer_ids = previous_payments + .iter() + .map(|payment_proof| { + payment_proof + .to_peer_id_payee() + .ok_or_else(|| { + error!("Invalid payment proof found, could not obtain peer_id {payment_proof:?}"); + UploadError::InternalError + }) + }) + .collect::>>(); + peer_ids + } else { + Ok(vec![]) + }; + + let _handle = tokio::spawn(async move { + let task_result = match InnerUploader::get_store_cost( + client, + xorname, + address, + get_store_cost_strategy.clone(), + previous_payments_to, + max_repayments_for_failed_data, + ) + .await + { + Ok(quote) => { + debug!("StoreCosts retrieved for {xorname:?} quote: {quote:?}"); + TaskResult::GetStoreCostOk { + xorname, + quote: Box::new(quote), + } + } + Err(err) => { + error!("Encountered error {err:?} when getting store_cost for {xorname:?}",); + + let max_repayments_reached = + matches!(&err, UploadError::MaximumRepaymentsReached { .. }); + + TaskResult::GetStoreCostErr { + xorname, + get_store_cost_strategy, + max_repayments_reached, + } + } + }; + + let _ = task_result_sender.send(task_result).await; + }); + } + + fn submit_get_register_task( + &mut self, + client: Client, + reg_addr: RegisterAddress, + task_result_sender: mpsc::Sender, + ) { + let xorname = reg_addr.xorname(); + trace!("Spawning get_register for {xorname:?}"); + let _handle = tokio::spawn(async move { + let task_result = match InnerUploader::get_register(client, reg_addr).await { + Ok(register) => { + debug!("Register retrieved for {xorname:?}"); + TaskResult::GetRegisterFromNetworkOk { + remote_register: register, + } + } + Err(err) => { + // todo match on error to only skip if GetRecordError + warn!("Encountered error {err:?} during get_register. The register has to be PUT as it is a new one."); + TaskResult::GetRegisterFromNetworkErr(xorname) + } + }; + let _ = task_result_sender.send(task_result).await; + }); + } + + fn submit_push_register_task( + &mut self, + client: Client, + upload_item: UploadItem, + verify_store: bool, + task_result_sender: mpsc::Sender, + ) { + let xorname = upload_item.xorname(); + trace!("Spawning push_register for {xorname:?}"); + let _handle = tokio::spawn(async move { + let task_result = match InnerUploader::push_register(client, upload_item, verify_store) + .await + { + Ok(reg) => { + debug!("Register pushed: {xorname:?}"); + TaskResult::PushRegisterOk { + updated_register: reg, + } + } + Err(err) => { + // todo match on error to only skip if GetRecordError + error!("Encountered error {err:?} during push_register. The register might not be present in the network"); + TaskResult::PushRegisterErr(xorname) + } + }; + let _ = task_result_sender.send(task_result).await; + }); + } + + fn submit_make_payment_task( + &mut self, + to_send: Option<(UploadItem, Box)>, + make_payment_sender: mpsc::Sender)>>, + ) { + let _handle = tokio::spawn(async move { + let _ = make_payment_sender.send(to_send).await; + }); + } + + fn submit_upload_item_task( + &mut self, + upload_item: UploadItem, + client: Client, + previous_payments: Option<&Vec>, + verify_store: bool, + retry_strategy: RetryStrategy, + task_result_sender: mpsc::Sender, + ) { + trace!("Spawning upload item task for {:?}", upload_item.xorname()); + + let last_payment = previous_payments.and_then(|payments| payments.last().cloned()); + + let _handle = tokio::spawn(async move { + let xorname = upload_item.xorname(); + let result = InnerUploader::upload_item( + client, + upload_item, + last_payment, + verify_store, + retry_strategy, + ) + .await; + + trace!("Upload item {xorname:?} uploaded with result {result:?}"); + match result { + Ok(_) => { + let _ = task_result_sender.send(TaskResult::UploadOk(xorname)).await; + } + Err(_) => { + let _ = task_result_sender + .send(TaskResult::UploadErr { xorname }) + .await; + } + }; + }); + } +} + +/// `Uploader` provides functionality for uploading both Chunks and Registers with support for retries and queuing. +/// This struct is not cloneable. To create a new instance with default configuration, use the `new` function. +/// To modify the configuration, use the provided setter methods (`set_...` functions). +#[derive(custom_debug::Debug)] +pub(super) struct InnerUploader { + pub(super) cfg: UploadCfg, + #[debug(skip)] + pub(super) client: Client, + #[debug(skip)] + pub(super) wallet: EvmWallet, + + // states + pub(super) all_upload_items: HashMap, + pub(super) pending_to_get_register: Vec, + pub(super) pending_to_push_register: Vec, + pub(super) pending_to_get_store_cost: Vec<(XorName, GetStoreCostStrategy)>, + pub(super) pending_to_pay: Vec<(XorName, Box)>, + pub(super) pending_to_upload: Vec, + pub(super) payment_proofs: HashMap>, + + // trackers + pub(super) on_going_get_register: HashSet, + pub(super) on_going_push_register: HashSet, + pub(super) on_going_get_cost: HashSet, + pub(super) on_going_payments: HashSet, + pub(super) on_going_uploads: HashSet, + + // error trackers + pub(super) n_errors_during_uploads: HashMap, + pub(super) push_register_errors: usize, + pub(super) get_store_cost_errors: usize, + pub(super) make_payments_errors: usize, + + // Upload summary + pub(super) tokens_spent: Amount, + pub(super) upload_final_balance: Amount, + pub(super) max_repayments_reached: HashSet, + pub(super) uploaded_addresses: HashSet, + pub(super) uploaded_registers: HashMap, + pub(super) uploaded_count: usize, + pub(super) skipped_count: usize, + + // Task channels for testing. Not used in actual code. + pub(super) testing_task_channels: + Option<(mpsc::Sender, mpsc::Receiver)>, + + // Public events events + #[debug(skip)] + pub(super) logged_event_sender_absence: bool, + #[debug(skip)] + pub(super) event_sender: Option>, +} + +impl InnerUploader { + pub(super) fn new(client: Client, wallet: EvmWallet) -> Self { + Self { + cfg: Default::default(), + client, + wallet, + + all_upload_items: Default::default(), + pending_to_get_register: Default::default(), + pending_to_push_register: Default::default(), + pending_to_get_store_cost: Default::default(), + pending_to_pay: Default::default(), + pending_to_upload: Default::default(), + payment_proofs: Default::default(), + + on_going_get_register: Default::default(), + on_going_push_register: Default::default(), + on_going_get_cost: Default::default(), + on_going_payments: Default::default(), + on_going_uploads: Default::default(), + + n_errors_during_uploads: Default::default(), + push_register_errors: Default::default(), + get_store_cost_errors: Default::default(), + max_repayments_reached: Default::default(), + make_payments_errors: Default::default(), + + tokens_spent: Amount::from(0), + upload_final_balance: Amount::from(0), + uploaded_addresses: Default::default(), + uploaded_registers: Default::default(), + uploaded_count: Default::default(), + skipped_count: Default::default(), + + testing_task_channels: None, + logged_event_sender_absence: Default::default(), + event_sender: Default::default(), + } + } + + // ====== Pop items ====== + + fn pop_item_for_push_register(&mut self) -> Result { + if let Some(name) = self.pending_to_push_register.pop() { + let upload_item = self.all_upload_items.get(&name).cloned().ok_or_else(|| { + error!("Uploadable item not found in all_upload_items: {name:?}"); + UploadError::InternalError + })?; + Ok(upload_item) + } else { + // the caller will be making sure this does not happen. + error!("No item found for push register"); + Err(UploadError::InternalError) + } + } + + fn pop_item_for_get_store_cost( + &mut self, + ) -> Result<(XorName, NetworkAddress, GetStoreCostStrategy)> { + let (xorname, strategy) = self.pending_to_get_store_cost.pop().ok_or_else(|| { + error!("No item found for get store cost"); + UploadError::InternalError + })?; + let address = self + .all_upload_items + .get(&xorname) + .map(|item| item.address()) + .ok_or_else(|| { + error!("Uploadable item not found in all_upload_items: {xorname:?}"); + UploadError::InternalError + })?; + Ok((xorname, address, strategy)) + } + + fn pop_item_for_make_payment(&mut self) -> Result<(UploadItem, Box)> { + if let Some((name, quote)) = self.pending_to_pay.pop() { + let upload_item = self.all_upload_items.get(&name).cloned().ok_or_else(|| { + error!("Uploadable item not found in all_upload_items: {name:?}"); + UploadError::InternalError + })?; + Ok((upload_item, quote)) + } else { + // the caller will be making sure this does not happen. + error!("No item found for make payment"); + Err(UploadError::InternalError) + } + } + + fn pop_item_for_upload_item(&mut self) -> Result { + if let Some(name) = self.pending_to_upload.pop() { + let upload_item = self.all_upload_items.get(&name).cloned().ok_or_else(|| { + error!("Uploadable item not found in all_upload_items: {name:?}"); + UploadError::InternalError + })?; + Ok(upload_item) + } else { + // the caller will be making sure this does not happen. + error!("No item found for upload item"); + Err(UploadError::InternalError) + } + } + + // ====== Processing Loop ====== + + // This is spawned as a long running task to prevent us from reading the wallet files + // each time we have to make a payment. + fn start_payment_processing_thread( + &self, + mut make_payment_receiver: mpsc::Receiver)>>, + task_result_sender: mpsc::Sender, + payment_batch_size: usize, + ) -> Result<()> { + let wallet = self.wallet.clone(); + + let _handle = tokio::spawn(async move { + debug!("Spawning the long running make payment processing loop."); + + let mut to_be_paid_list = Vec::new(); + let mut cost_map = HashMap::new(); + + let mut got_a_previous_force_payment = false; + while let Some(payment) = make_payment_receiver.recv().await { + let make_payments = if let Some((item, quote)) = payment { + to_be_paid_list.push(( + quote.2.hash(), + quote.2.rewards_address, + quote.2.cost.as_atto(), + )); + let xorname = item.xorname(); + debug!("Inserted {xorname:?} into to_be_paid_list"); + + let _ = cost_map.insert(xorname, (quote.0, quote.1, quote.2)); + cost_map.len() >= payment_batch_size || got_a_previous_force_payment + } else { + // using None to indicate as all paid. + let make_payments = !cost_map.is_empty(); + debug!("Got a forced forced round of make payment."); + // Note: There can be a mismatch of ordering between the main loop and the make payment loop because + // the instructions are sent via a task(channel.send().await). And there is no guarantee for the + // order to come in the same order as they were sent. + // + // We cannot just disobey the instruction inside the child loop, as the mainloop would be expecting + // a result back for a particular instruction. + if !make_payments { + got_a_previous_force_payment = true; + warn!( + "We were told to force make payment, but cost_map is empty, so we can't do that just yet. Waiting for a task to insert a quote into cost_map" + ) + } + + make_payments + }; + + if make_payments { + // reset force_make_payment + if got_a_previous_force_payment { + info!("A task inserted a quote into cost_map, so we can now make a forced round of payment!"); + got_a_previous_force_payment = false; + } + + let terminate_process = false; + let data_payments = std::mem::take(&mut to_be_paid_list); + + let result = match wallet.pay_for_quotes(data_payments).await { + Ok(payments) => { + trace!("Made payments for {} records.", payments.len()); + + let payment_proofs = + payment_proof_from_quotes_and_payments(&cost_map, &payments); + + TaskResult::MakePaymentsOk { payment_proofs } + } + // TODO: Don't allow > 1 batch. + Err(err) => { + let error = err.0; + let _succeeded_batch = err.1; + + error!("When paying {} data, got error {error:?}", cost_map.len(),); + // TODO: match on insufficient gas/token error. and set terminate_process = true + TaskResult::MakePaymentsErr { + failed_xornames: cost_map + .into_iter() + .map(|(k, v)| (k, Box::new(v))) + .collect(), + } + } + }; + let pay_for_chunk_sender_clone = task_result_sender.clone(); + let _handle = tokio::spawn(async move { + let _ = pay_for_chunk_sender_clone.send(result).await; + }); + + cost_map = HashMap::new(); + + if terminate_process { + // The error will trigger the entire upload process to be terminated. + // Hence here we shall terminate the inner loop first, + // to avoid the wallet going further to be potentially got corrupted. + warn!( + "Terminating make payment processing loop due to un-recoverable error." + ); + break; + } + } + } + debug!("Make payment processing loop terminated."); + }); + Ok(()) + } + + // ====== Logic ====== + + async fn get_register(client: Client, reg_addr: RegisterAddress) -> Result { + let reg = client.register_get(reg_addr).await?; + Ok(reg) + } + + async fn push_register( + client: Client, + upload_item: UploadItem, + verify_store: bool, + ) -> Result { + let register = if let UploadItem::Register { reg, .. } = upload_item { + reg + } else { + error!("Invalid upload item found: {upload_item:?}"); + return Err(UploadError::InternalError); + }; + + let verification = if verify_store { + let get_cfg = GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::default()), + target_record: None, + expected_holders: Default::default(), + is_register: true, + }; + Some((VerificationKind::Network, get_cfg)) + } else { + None + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::All, + retry_strategy: None, + use_put_record_to: None, + verification, + }; + + client.register_upload(®ister, None, &put_cfg).await?; + + Ok(register) + } + + async fn get_store_cost( + client: Client, + xorname: XorName, + address: NetworkAddress, + get_store_cost_strategy: GetStoreCostStrategy, + previous_payments_to: Result>, + max_repayments_for_failed_data: usize, + ) -> Result { + let filter_list = match get_store_cost_strategy { + GetStoreCostStrategy::Cheapest => vec![], + GetStoreCostStrategy::SelectDifferentPayee => { + let filter_list = previous_payments_to?; + + // if we have already made initial + max_repayments, then we should error out. + if Self::have_we_reached_max_repayments( + filter_list.len(), + max_repayments_for_failed_data, + ) { + // error is used by the caller. + return Err(UploadError::MaximumRepaymentsReached { + items: vec![xorname], + summary: UploadSummary::default(), + }); + } + + debug!("Filtering out payments from {filter_list:?} during get_store_cost for {xorname:?}"); + filter_list + } + }; + let quote = client + .network + .get_store_costs_from_network(address, filter_list) + .await?; + Ok(quote) + } + + async fn upload_item( + client: Client, + upload_item: UploadItem, + previous_payments: Option, + _verify_store: bool, + _retry_strategy: RetryStrategy, + ) -> Result<()> { + let xorname = upload_item.xorname(); + + let payment_proof = previous_payments.ok_or_else(|| { + error!("No payment proof found for {xorname:?}"); + UploadError::InternalError + })?; + let payee = payment_proof.to_peer_id_payee().ok_or_else(|| { + error!("Invalid payment proof found, could not obtain peer_id {payment_proof:?}"); + UploadError::InternalError + }); + + debug!("Payments for upload item: {xorname:?} to {payee:?}: {payment_proof:?}"); + + match upload_item { + UploadItem::Chunk { address: _, chunk } => { + let chunk = match chunk { + Either::Left(chunk) => chunk, + Either::Right(path) => { + let bytes = std::fs::read(&path).inspect_err(|err| { + error!("Error reading chunk at {path:?}: {err:?}"); + })?; + Chunk::new(Bytes::from(bytes)) + } + }; + + trace!("Client upload started for chunk: {xorname:?}"); + // TODO: pass in the verify_store, retry_startegy (or putcfg is even better). Also the fn has a panic. remove it. + client + .chunk_upload_with_payment(chunk, payment_proof) + .await?; + + trace!("Client upload completed for chunk: {xorname:?}"); + } + UploadItem::Register { address: _, reg: _ } => { + // TODO: create a new fn to perform register_upload_with_payment + // reg.publish_register(Some((payment, payee)), verify_store) + // .await?; + trace!("Client upload completed for register: {xorname:?}"); + } + } + + Ok(()) + } + + // ====== Misc ====== + + fn emit_upload_event(&mut self, event: UploadEvent) { + if let Some(sender) = self.event_sender.as_ref() { + let sender_clone = sender.clone(); + let _handle = tokio::spawn(async move { + if let Err(err) = sender_clone.send(event).await { + error!("Error emitting upload event: {err:?}"); + } + }); + } else if !self.logged_event_sender_absence { + info!("FilesUpload upload event sender is not set. Use get_upload_events() if you need to keep track of the progress"); + self.logged_event_sender_absence = true; + } + } + + /// If we have already made initial + max_repayments_allowed, then we should error out. + // separate function as it is used in test. + pub(super) fn have_we_reached_max_repayments( + payments_made: usize, + max_repayments_allowed: usize, + ) -> bool { + // if max_repayments_allowed = 1, then we have reached capacity = true if 2 payments have been made. i.e., + // i.e., 1 initial + 1 repayment. + payments_made > max_repayments_allowed + } + + fn validate_upload_cfg(&self) -> Result<()> { + if self.cfg.payment_batch_size > PAYMENT_BATCH_SIZE { + error!("Payment batch size is greater than the maximum allowed: {PAYMENT_BATCH_SIZE}"); + return Err(UploadError::InvalidCfg(format!( + "Payment batch size is greater than the maximum allowed: {PAYMENT_BATCH_SIZE}" + ))); + } + if self.cfg.payment_batch_size < 1 { + error!("Payment batch size cannot be less than 1"); + return Err(UploadError::InvalidCfg( + "Payment batch size cannot be less than 1".to_string(), + )); + } + if self.cfg.batch_size < 1 { + error!("Batch size cannot be less than 1"); + return Err(UploadError::InvalidCfg( + "Batch size cannot be less than 1".to_string(), + )); + } + + Ok(()) + } +} diff --git a/autonomi/src/utils.rs b/autonomi/src/utils.rs index fc9ceb7718..a7273f9bae 100644 --- a/autonomi/src/utils.rs +++ b/autonomi/src/utils.rs @@ -1,14 +1,15 @@ -use sn_evm::{PaymentQuote, ProofOfPayment, QuoteHash, TxHash}; +use sn_evm::{ProofOfPayment, QuoteHash, TxHash}; +use sn_networking::PayeeQuote; use std::collections::{BTreeMap, HashMap}; use xor_name::XorName; pub fn payment_proof_from_quotes_and_payments( - quotes: &HashMap, + quotes: &HashMap, payments: &BTreeMap, ) -> HashMap { quotes .iter() - .filter_map(|(xor_name, quote)| { + .filter_map(|(xor_name, (_, _, quote))| { payments.get("e.hash()).map(|tx_hash| { ( *xor_name, diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index 49956db39e..a68fe4a01e 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -13,6 +13,7 @@ pub use evmlib::common::Address as RewardsAddress; pub use evmlib::common::Address as EvmAddress; pub use evmlib::common::QuotePayment; pub use evmlib::common::{QuoteHash, TxHash}; +pub use evmlib::contract::network_token::Error as EvmNetworkTokenError; pub use evmlib::cryptography; #[cfg(feature = "external-signer")] pub use evmlib::external_signer; From 985034ae4b217efe771cac3a94cf25cff4b501a6 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 18 Oct 2024 01:44:56 +0530 Subject: [PATCH 079/128] test(autonomi): add tests for uploader --- Cargo.lock | 2 + autonomi/Cargo.toml | 3 + autonomi/src/client/registers.rs | 9 + autonomi/src/uploader/mod.rs | 17 + autonomi/src/uploader/tests.rs | 1 - autonomi/src/uploader/tests/mod.rs | 520 +++++++++++++++++++++++++++ autonomi/src/uploader/tests/setup.rs | 470 ++++++++++++++++++++++++ autonomi/src/uploader/upload.rs | 9 +- 8 files changed, 1027 insertions(+), 4 deletions(-) delete mode 100644 autonomi/src/uploader/tests.rs create mode 100644 autonomi/src/uploader/tests/mod.rs create mode 100644 autonomi/src/uploader/tests/setup.rs diff --git a/Cargo.lock b/Cargo.lock index b0b5441302..834e26f3cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1079,6 +1079,7 @@ name = "autonomi" version = "0.2.1" dependencies = [ "alloy", + "assert_matches", "bip39", "blst", "blstrs 0.7.1", @@ -1110,6 +1111,7 @@ dependencies = [ "sn_peers_acquisition", "sn_protocol", "sn_registers", + "tempfile", "test_utils", "thiserror", "tiny_http", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 37de39675a..324c3c6979 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -61,10 +61,13 @@ blstrs = "0.7.1" [dev-dependencies] alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } +assert_matches = "1.5.0" eyre = "0.6.5" sha2 = "0.10.6" sn_logging = { path = "../sn_logging", version = "0.2.37" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_registers = { path = "../sn_registers", version = "0.4.0", features = ["test-utils"] } +tempfile = "3.6.0" # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. test_utils = { path = "../test_utils" } diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index f7e14c0e09..ea47d363fb 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -150,6 +150,15 @@ impl Register { Ok(()) } + + #[cfg(test)] + pub(crate) fn test_new_from_register(signed_reg: SignedRegister) -> Register { + let crdt_reg = RegisterCrdt::new(*signed_reg.address()); + Register { + signed_reg, + crdt_reg, + } + } } impl Client { diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs index 2677a10b17..b7ad7039a0 100644 --- a/autonomi/src/uploader/mod.rs +++ b/autonomi/src/uploader/mod.rs @@ -221,6 +221,19 @@ impl Uploader { .set_batch_size(batch_size); } + /// Sets the default payment batch size that determines the number of payments that are made in a single + /// transaction. The maximum number of payments that can be made in a single transaction is 512. + /// + /// By default, this option is set to the constant `PAYMENT_BATCH_SIZE: usize = 512`. + pub fn set_payment_batch_size(&mut self, payment_batch_size: usize) { + // Self can only be constructed with new(), which will set inner to InnerUploader always. + // So it is okay to call unwrap here. + self.inner + .as_mut() + .expect("Uploader::new makes sure inner is present") + .set_payment_batch_size(payment_batch_size); + } + /// Sets the option to verify the data after they have been uploaded. /// /// By default, this option is set to true. @@ -371,6 +384,10 @@ impl InnerUploader { self.cfg.batch_size = batch_size; } + pub(super) fn set_payment_batch_size(&mut self, payment_batch_size: usize) { + self.cfg.payment_batch_size = payment_batch_size; + } + pub(super) fn set_verify_store(&mut self, verify_store: bool) { self.cfg.verify_store = verify_store; } diff --git a/autonomi/src/uploader/tests.rs b/autonomi/src/uploader/tests.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/autonomi/src/uploader/tests.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/autonomi/src/uploader/tests/mod.rs b/autonomi/src/uploader/tests/mod.rs new file mode 100644 index 0000000000..9c32c5af1e --- /dev/null +++ b/autonomi/src/uploader/tests/mod.rs @@ -0,0 +1,520 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +mod setup; + +use crate::uploader::{ + tests::setup::{ + get_dummy_chunk_paths, get_dummy_registers, get_inner_uploader, start_uploading_with_steps, + TestSteps, + }, + UploadError, UploadEvent, +}; +use assert_matches::assert_matches; +use bls::SecretKey; +use eyre::Result; +use sn_logging::LogBuilder; +use std::collections::VecDeque; +use tempfile::tempdir; + +// ===== HAPPY PATH ======= + +/// 1. Chunk: if cost =0, then chunk is present in the network. +#[tokio::test] +async fn chunk_that_already_exists_in_the_network_should_return_zero_store_cost() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let temp_dir = tempdir()?; + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); + + // the path to test + let steps = vec![TestSteps::GetStoreCostOk { + trigger_zero_cost: true, + assert_select_different_payee: false, + }]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + SecretKey::random(), + task_result_rx, + ); + + let _stats = upload_handle.await??; + let events = events_handle.await?; + + assert_eq!(events.len(), 1); + assert_matches!(events[0], UploadEvent::ChunkAlreadyExistsInNetwork(_)); + Ok(()) +} + +/// 2. Chunk: if cost !=0, then make payment upload to the network. +#[tokio::test] +async fn chunk_should_be_paid_for_and_uploaded_if_cost_is_not_zero() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let temp_dir = tempdir()?; + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); + + // the path to test + let steps = vec![ + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: false, + }, + TestSteps::MakePaymentOk, + TestSteps::UploadItemOk, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + SecretKey::random(), + task_result_rx, + ); + + let _stats = upload_handle.await??; + let events = events_handle.await?; + + assert_eq!(events.len(), 2); + assert_matches!(events[0], UploadEvent::PaymentMade { .. }); + assert_matches!(events[1], UploadEvent::ChunkUploaded(..)); + Ok(()) +} + +/// 3. Register: if GET register = ok, then merge and push the register. +#[tokio::test] +async fn register_should_be_merged_and_pushed_if_it_already_exists_in_the_network() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + let register_sk = SecretKey::random(); + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); + + // the path to test + let steps = vec![TestSteps::GetRegisterOk, TestSteps::PushRegisterOk]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + register_sk, + task_result_rx, + ); + + let _stats = upload_handle.await??; + let events = events_handle.await?; + + assert_eq!(events.len(), 1); + assert_matches!(events[0], UploadEvent::RegisterUpdated { .. }); + Ok(()) +} + +/// 4. Register: if Get register = err, then get store cost and upload. +#[tokio::test] +async fn register_should_be_paid_and_uploaded_if_it_does_not_exists() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + let register_sk = SecretKey::random(); + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); + + // the path to test + // todo: what if cost = 0 even after GetRegister returns error. check that + let steps = vec![ + TestSteps::GetRegisterErr, + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: false, + }, + TestSteps::MakePaymentOk, + TestSteps::UploadItemOk, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + register_sk, + task_result_rx, + ); + + let _stats = upload_handle.await??; + let events = events_handle.await?; + + assert_eq!(events.len(), 2); + assert_matches!(events[0], UploadEvent::PaymentMade { .. }); + assert_matches!(events[1], UploadEvent::RegisterUploaded(..)); + Ok(()) +} + +// ===== REPAYMENTS ====== + +/// 1. Chunks: if upload task fails > threshold, then get store cost should be triggered with SelectDifferentStrategy +/// and then uploaded. +#[tokio::test] +async fn chunks_should_perform_repayment_if_the_upload_fails_multiple_times() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let temp_dir = tempdir()?; + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); + + // the path to test + let steps = vec![ + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: false, + }, + TestSteps::MakePaymentOk, + TestSteps::UploadItemErr, + TestSteps::UploadItemErr, + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: true, + }, + TestSteps::MakePaymentOk, + TestSteps::UploadItemOk, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + SecretKey::random(), + task_result_rx, + ); + + let _stats = upload_handle.await??; + let events = events_handle.await?; + + assert_eq!(events.len(), 3); + assert_matches!(events[0], UploadEvent::PaymentMade { .. }); + assert_matches!(events[1], UploadEvent::PaymentMade { .. }); + assert_matches!(events[2], UploadEvent::ChunkUploaded(..)); + Ok(()) +} + +/// 2. Register: if upload task fails > threshold, then get store cost should be triggered with SelectDifferentStrategy +/// and then uploaded. +#[tokio::test] +async fn registers_should_perform_repayment_if_the_upload_fails_multiple_times() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + let register_sk = SecretKey::random(); + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); + + // the path to test + let steps = vec![ + TestSteps::GetRegisterErr, + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: false, + }, + TestSteps::MakePaymentOk, + TestSteps::UploadItemErr, + TestSteps::UploadItemErr, + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: true, + }, + TestSteps::MakePaymentOk, + TestSteps::UploadItemOk, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + register_sk, + task_result_rx, + ); + + let _stats = upload_handle.await??; + let events = events_handle.await?; + + assert_eq!(events.len(), 3); + assert_matches!(events[0], UploadEvent::PaymentMade { .. }); + assert_matches!(events[1], UploadEvent::PaymentMade { .. }); + assert_matches!(events[2], UploadEvent::RegisterUploaded(..)); + Ok(()) +} + +// ===== ERRORS ======= +/// 1. Registers: Multiple PushRegisterErr should result in Error::SequentialNetworkErrors +#[tokio::test] +async fn register_upload_should_error_out_if_there_are_multiple_push_failures() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + let register_sk = SecretKey::random(); + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); + + // the path to test + let steps = vec![ + TestSteps::GetRegisterOk, + TestSteps::PushRegisterErr, + TestSteps::PushRegisterErr, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + register_sk, + task_result_rx, + ); + + assert_matches!( + upload_handle.await?, + Err(UploadError::SequentialNetworkErrors) + ); + let events = events_handle.await?; + + // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. + assert_eq!(events.len(), 0); + Ok(()) +} + +/// 2. Chunk: Multiple errors during get store cost should result in Error::SequentialNetworkErrors +#[tokio::test] +async fn chunk_should_error_out_if_there_are_multiple_errors_during_get_store_cost() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let temp_dir = tempdir()?; + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); + + // the path to test + let steps = vec![ + TestSteps::GetStoreCostErr { + assert_select_different_payee: false, + }, + TestSteps::GetStoreCostErr { + assert_select_different_payee: false, + }, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + SecretKey::random(), + task_result_rx, + ); + + assert_matches!( + upload_handle.await?, + Err(UploadError::SequentialNetworkErrors) + ); + let events = events_handle.await?; + + // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. + assert_eq!(events.len(), 0); + Ok(()) +} + +/// 3. Register: Multiple errors during get store cost should result in Error::SequentialNetworkErrors +#[tokio::test] +async fn register_should_error_out_if_there_are_multiple_errors_during_get_store_cost() -> Result<()> +{ + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + let register_sk = SecretKey::random(); + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); + + // the path to test + let steps = vec![ + TestSteps::GetRegisterErr, + TestSteps::GetStoreCostErr { + assert_select_different_payee: false, + }, + TestSteps::GetStoreCostErr { + assert_select_different_payee: false, + }, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + register_sk, + task_result_rx, + ); + + assert_matches!( + upload_handle.await?, + Err(UploadError::SequentialNetworkErrors) + ); + let events = events_handle.await?; + + // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. + assert_eq!(events.len(), 0); + Ok(()) +} + +/// 4. Chunk: Multiple errors during make payment should result in Error::SequentialUploadPaymentError +#[tokio::test] +async fn chunk_should_error_out_if_there_are_multiple_errors_during_make_payment() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let temp_dir = tempdir()?; + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); + + // the path to test + let steps = vec![ + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: false, + }, + TestSteps::MakePaymentErr, + TestSteps::MakePaymentErr, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + SecretKey::random(), + task_result_rx, + ); + + assert_matches!( + upload_handle.await?, + Err(UploadError::SequentialUploadPaymentError) + ); + let events = events_handle.await?; + + // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. + assert_eq!(events.len(), 0); + Ok(()) +} + +/// 5. Register: Multiple errors during make payment should result in Error::SequentialUploadPaymentError +#[tokio::test] +async fn register_should_error_out_if_there_are_multiple_errors_during_make_payment() -> Result<()> +{ + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + let register_sk = SecretKey::random(); + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); + + // the path to test + let steps = vec![ + TestSteps::GetRegisterErr, + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: false, + }, + TestSteps::MakePaymentErr, + TestSteps::MakePaymentErr, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + register_sk, + task_result_rx, + ); + + assert_matches!( + upload_handle.await?, + Err(UploadError::SequentialUploadPaymentError) + ); + let events = events_handle.await?; + + // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. + assert_eq!(events.len(), 0); + Ok(()) +} + +// 6: Chunks + Registers: if the number of repayments exceed a threshold, it should return MaximumRepaymentsReached error. +#[tokio::test] +async fn maximum_repayment_error_should_be_triggered_during_get_store_cost() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let temp_dir = tempdir()?; + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); + + // the path to test + let steps = vec![ + // initial payment done + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: false, + }, + TestSteps::MakePaymentOk, + TestSteps::UploadItemErr, + TestSteps::UploadItemErr, + // first repayment + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: true, + }, + TestSteps::MakePaymentOk, + TestSteps::UploadItemErr, + TestSteps::UploadItemErr, + // thus after reaching max repayments, we should error out during get store cost. + TestSteps::GetStoreCostErr { + assert_select_different_payee: true, + }, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + SecretKey::random(), + task_result_rx, + ); + + assert_matches!( + upload_handle.await?, + Err(UploadError::MaximumRepaymentsReached { .. }) + ); + let events = events_handle.await?; + + assert_eq!(events.len(), 2); + assert_matches!(events[0], UploadEvent::PaymentMade { .. }); + assert_matches!(events[1], UploadEvent::PaymentMade { .. }); + Ok(()) +} diff --git a/autonomi/src/uploader/tests/setup.rs b/autonomi/src/uploader/tests/setup.rs new file mode 100644 index 0000000000..53e628b99d --- /dev/null +++ b/autonomi/src/uploader/tests/setup.rs @@ -0,0 +1,470 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::{ + client::registers::Register, + uploader::{ + upload::{start_upload, InnerUploader}, + GetStoreCostStrategy, TaskResult, UploadError, UploadEvent, UploadItem, UploadSummary, + UploaderInterface, + }, + Client, +}; +use alloy::{primitives::TxHash, signers::local::PrivateKeySigner}; +use assert_matches::assert_matches; +use bls::SecretKey as BlsSecretKey; +use eyre::Result; +use libp2p::{identity::Keypair, PeerId}; +use rand::thread_rng; +use sn_evm::{EvmNetwork, EvmWallet, PaymentQuote, ProofOfPayment}; +use sn_networking::{NetworkBuilder, PayeeQuote}; +use sn_protocol::{storage::RetryStrategy, NetworkAddress}; +use sn_registers::{RegisterAddress, SignedRegister}; +use std::{ + collections::{BTreeMap, HashMap, VecDeque}, + path::PathBuf, + sync::Arc, +}; +use tokio::{runtime::Handle, sync::mpsc, task::JoinHandle}; +use xor_name::XorName; + +struct TestUploader { + inner: Option, + test_steps: VecDeque, + task_result_sender: mpsc::Sender, + + // test states + make_payment_collector: Vec<(XorName, Box)>, + payments_made_per_xorname: BTreeMap, + payment_batch_size: usize, + register_sk: BlsSecretKey, +} + +impl UploaderInterface for TestUploader { + fn take_inner_uploader(&mut self) -> InnerUploader { + self.inner.take().unwrap() + } + + fn submit_get_register_task( + &mut self, + _client: Client, + reg_addr: RegisterAddress, + _task_result_sender: mpsc::Sender, + ) { + let xorname = reg_addr.xorname(); + let step = self + .test_steps + .pop_front() + .expect("TestSteps are empty. Expected a GetRegister step."); + let handle = Handle::current(); + let register_sk = self.register_sk.clone(); + let task_result_sender = self.task_result_sender.clone(); + + println!("spawn_get_register called for: {xorname:?}. Step to execute: {step:?}"); + info!("TEST: spawn_get_register called for: {xorname:?}. Step to execute: {step:?}"); + match step { + TestSteps::GetRegisterOk => { + handle.spawn(async move { + let remote_register = + SignedRegister::test_new_from_address(reg_addr, ®ister_sk); + let remote_register = Register::test_new_from_register(remote_register); + task_result_sender + .send(TaskResult::GetRegisterFromNetworkOk { remote_register }) + .await + .expect("Failed to send task result"); + }); + } + TestSteps::GetRegisterErr => { + handle.spawn(async move { + task_result_sender + .send(TaskResult::GetRegisterFromNetworkErr(xorname)) + .await + .expect("Failed to send task result"); + }); + } + con => panic!("Test failed: Expected GetRegister step. Got: {con:?}"), + } + } + + fn submit_push_register_task( + &mut self, + _client: Client, + upload_item: UploadItem, + _verify_store: bool, + _task_result_sender: mpsc::Sender, + ) { + let xorname = upload_item.xorname(); + let step = self + .test_steps + .pop_front() + .expect("TestSteps are empty. Expected a PushRegister step."); + let handle = Handle::current(); + let task_result_sender = self.task_result_sender.clone(); + + println!("spawn_push_register called for: {xorname:?}. Step to execute: {step:?}"); + info!("TEST: spawn_push_register called for: {xorname:?}. Step to execute: {step:?}"); + match step { + TestSteps::PushRegisterOk => { + handle.spawn(async move { + let updated_register = match upload_item { + UploadItem::Register { reg, .. } => reg, + _ => panic!("Expected UploadItem::Register"), + }; + task_result_sender + .send(TaskResult::PushRegisterOk { + // this register is just used for returning. + updated_register, + }) + .await + .expect("Failed to send task result"); + }); + } + TestSteps::PushRegisterErr => { + handle.spawn(async move { + task_result_sender + .send(TaskResult::PushRegisterErr(xorname)) + .await + .expect("Failed to send task result"); + }); + } + con => panic!("Test failed: Expected PushRegister step. Got: {con:?}"), + } + } + + fn submit_get_store_cost_task( + &mut self, + _client: Client, + xorname: XorName, + _address: NetworkAddress, + _previous_payments: Option<&Vec>, + get_store_cost_strategy: GetStoreCostStrategy, + max_repayments_for_failed_data: usize, + _task_result_sender: mpsc::Sender, + ) { + let step = self + .test_steps + .pop_front() + .expect("TestSteps are empty. Expected a GetStoreCost step."); + let handle = Handle::current(); + let task_result_sender = self.task_result_sender.clone(); + + println!("spawn_get_store_cost called for: {xorname:?}. Step to execute: {step:?}"); + info!("TEST: spawn_get_store_cost called for: {xorname:?}. Step to execute: {step:?}"); + + let has_max_payments_reached_closure = + |get_store_cost_strategy: &GetStoreCostStrategy| -> bool { + match get_store_cost_strategy { + GetStoreCostStrategy::SelectDifferentPayee => { + if let Some(n_payments) = self.payments_made_per_xorname.get(&xorname) { + InnerUploader::have_we_reached_max_repayments( + *n_payments, + max_repayments_for_failed_data, + ) + } else { + false + } + } + _ => false, + } + }; + + // if select different payee, then it can possibly error out if max_repayments have been reached. + // then the step should've been a GetStoreCostErr. + if has_max_payments_reached_closure(&get_store_cost_strategy) { + assert_matches!(step, TestSteps::GetStoreCostErr { .. }, "Max repayments have been reached, so we expect a GetStoreCostErr, not GetStoreCostOk"); + } + + match step { + TestSteps::GetStoreCostOk { + trigger_zero_cost, + assert_select_different_payee, + } => { + // Make sure that the received strategy is the one defined in the step. + assert!(match get_store_cost_strategy { + // match here to not miss out on any new strategies. + GetStoreCostStrategy::Cheapest => !assert_select_different_payee, + GetStoreCostStrategy::SelectDifferentPayee { .. } => + assert_select_different_payee, + }); + + let mut quote = PaymentQuote::zero(); + if !trigger_zero_cost { + quote.cost = 1.into(); + } + handle.spawn(async move { + task_result_sender + .send(TaskResult::GetStoreCostOk { + xorname, + quote: Box::new(( + PeerId::random(), + PrivateKeySigner::random().address(), + quote, + )), + }) + .await + .expect("Failed to send task result"); + }); + } + TestSteps::GetStoreCostErr { + assert_select_different_payee, + } => { + // Make sure that the received strategy is the one defined in the step. + assert!(match get_store_cost_strategy { + // match here to not miss out on any new strategies. + GetStoreCostStrategy::Cheapest => !assert_select_different_payee, + GetStoreCostStrategy::SelectDifferentPayee { .. } => + assert_select_different_payee, + }); + let max_repayments_reached = + has_max_payments_reached_closure(&get_store_cost_strategy); + + handle.spawn(async move { + task_result_sender + .send(TaskResult::GetStoreCostErr { + xorname, + get_store_cost_strategy, + max_repayments_reached, + }) + .await + .expect("Failed to send task result"); + }); + } + con => panic!("Test failed: Expected GetStoreCost step. Got: {con:?}"), + } + } + + fn submit_make_payment_task( + &mut self, + to_send: Option<(UploadItem, Box)>, + _make_payment_sender: mpsc::Sender)>>, + ) { + let step = self + .test_steps + .pop_front() + .expect("TestSteps are empty. Expected a MakePayment step."); + let handle = Handle::current(); + let task_result_sender = self.task_result_sender.clone(); + match &to_send { + Some((upload_item, quote)) => { + let xorname = upload_item.xorname(); + println!("spawn_make_payment called for: {xorname:?}. Step to execute: {step:?}"); + info!( + "TEST: spawn_make_payment called for: {xorname:?}. Step to execute: {step:?}" + ); + + self.make_payment_collector + .push((upload_item.xorname(), quote.clone())); + } + None => { + println!( + "spawn_make_payment called with force make payment. Step to execute: {step:?}" + ); + info!("TEST: spawn_make_payment called with force make payment. Step to execute: {step:?}"); + } + } + + // gotta collect batch size before sending task result. + let _make_payment = self.make_payment_collector.len() >= self.payment_batch_size + || (to_send.is_none() && !self.make_payment_collector.is_empty()); + + match step { + // TestSteps::MakePaymentJustCollectItem => { + // // The test expected for us to just collect item, but if the logic wants us to make payment, then it as + // // error + // assert!(!make_payment); + // } + TestSteps::MakePaymentOk => { + let payment_proofs = std::mem::take(&mut self.make_payment_collector) + .into_iter() + .map(|(xorname, _)| { + ( + xorname, + ProofOfPayment { + quote: PaymentQuote::zero(), + tx_hash: TxHash::repeat_byte(0), + }, + ) + }) + .collect::>(); + // track the payments per xorname + for xorname in payment_proofs.keys() { + let entry = self.payments_made_per_xorname.entry(*xorname).or_insert(0); + *entry += 1; + } + + handle.spawn(async move { + task_result_sender + .send(TaskResult::MakePaymentsOk { payment_proofs }) + .await + .expect("Failed to send task result"); + }); + } + TestSteps::MakePaymentErr => { + let failed_xornames = std::mem::take(&mut self.make_payment_collector); + + handle.spawn(async move { + task_result_sender + .send(TaskResult::MakePaymentsErr { failed_xornames }) + .await + .expect("Failed to send task result"); + }); + } + con => panic!("Test failed: Expected MakePayment step. Got: {con:?}"), + } + } + + fn submit_upload_item_task( + &mut self, + upload_item: UploadItem, + _client: Client, + _previous_payments: Option<&Vec>, + _verify_store: bool, + _retry_strategy: RetryStrategy, + _task_result_sender: mpsc::Sender, + ) { + let xorname = upload_item.xorname(); + let step = self + .test_steps + .pop_front() + .expect("TestSteps are empty. Expected a UploadItem step."); + let handle = Handle::current(); + let task_result_sender = self.task_result_sender.clone(); + + println!("spawn_upload_item called for: {xorname:?}. Step to execute: {step:?}"); + info!("TEST: spawn_upload_item called for: {xorname:?}. Step to execute: {step:?}"); + match step { + TestSteps::UploadItemOk => { + handle.spawn(async move { + task_result_sender + .send(TaskResult::UploadOk(xorname)) + .await + .expect("Failed to send task result"); + }); + } + TestSteps::UploadItemErr => { + handle.spawn(async move { + task_result_sender + .send(TaskResult::UploadErr { xorname }) + .await + .expect("Failed to send task result"); + }); + } + con => panic!("Test failed: Expected UploadItem step. Got: {con:?}"), + } + } +} + +#[derive(Debug, Clone)] +pub enum TestSteps { + GetRegisterOk, + GetRegisterErr, + PushRegisterOk, + PushRegisterErr, + GetStoreCostOk { + trigger_zero_cost: bool, + assert_select_different_payee: bool, + }, + GetStoreCostErr { + assert_select_different_payee: bool, + }, + // MakePaymentJustCollectItem, + MakePaymentOk, + MakePaymentErr, + UploadItemOk, + UploadItemErr, +} + +pub fn get_inner_uploader() -> Result<(InnerUploader, mpsc::Sender)> { + let client = build_unconnected_client()?; + + let mut inner = InnerUploader::new( + client, + EvmWallet::new_with_random_wallet(EvmNetwork::new_custom( + "http://localhost:63319/", + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )), + ); + let (task_result_sender, task_result_receiver) = mpsc::channel(100); + inner.testing_task_channels = Some((task_result_sender.clone(), task_result_receiver)); + + Ok((inner, task_result_sender)) +} + +// Spawns two tasks. One is the actual upload task that will return an UploadStat when completed. +// The other is a one to collect all the UploadEvent emitted by the previous task. +pub fn start_uploading_with_steps( + mut inner_uploader: InnerUploader, + test_steps: VecDeque, + register_sk: BlsSecretKey, + task_result_sender: mpsc::Sender, +) -> ( + JoinHandle>, + JoinHandle>, +) { + let payment_batch_size = inner_uploader.cfg.payment_batch_size; + let mut upload_event_rx = inner_uploader.get_event_receiver(); + + let upload_handle = tokio::spawn(start_upload(Box::new(TestUploader { + inner: Some(inner_uploader), + test_steps, + task_result_sender, + make_payment_collector: Default::default(), + payments_made_per_xorname: Default::default(), + payment_batch_size, + register_sk, + }))); + + let event_handle = tokio::spawn(async move { + let mut events = vec![]; + while let Some(event) = upload_event_rx.recv().await { + events.push(event); + } + events + }); + + (upload_handle, event_handle) +} + +// Collect all the upload events into a list + +// Build a very simple client struct for testing. This does not connect to any network. +// The UploaderInterface eliminates the need for direct networking in tests. +pub fn build_unconnected_client() -> Result { + let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), true); + let (network, ..) = network_builder.build_client()?; + let client = Client { + network, + client_event_sender: Arc::new(None), + }; + Ok(client) +} + +// We don't perform any networking, so the paths can be dummy ones. +pub fn get_dummy_chunk_paths(num: usize, temp_dir: PathBuf) -> Vec<(XorName, PathBuf)> { + let mut rng = thread_rng(); + let mut chunks = Vec::with_capacity(num); + for _ in 0..num { + chunks.push((XorName::random(&mut rng), temp_dir.clone())); + } + chunks +} + +pub fn get_dummy_registers(num: usize, register_sk: &BlsSecretKey) -> Vec { + let mut rng = thread_rng(); + let mut registers = Vec::with_capacity(num); + for _ in 0..num { + // test_new_from_address that is used during get_register, + // uses AnyoneCanWrite permission, so use the same here + let address = RegisterAddress::new(XorName::random(&mut rng), register_sk.public_key()); + let base_register = SignedRegister::test_new_from_address(address, register_sk); + let register = Register::test_new_from_register(base_register); + registers.push(register); + } + registers +} diff --git a/autonomi/src/uploader/upload.rs b/autonomi/src/uploader/upload.rs index 665396430e..b07fe53492 100644 --- a/autonomi/src/uploader/upload.rs +++ b/autonomi/src/uploader/upload.rs @@ -114,15 +114,18 @@ pub(super) async fn start_upload( // The loop also breaks if we fail to get_store_cost / make payment / upload for n consecutive times. if uploader.all_upload_items.is_empty() { debug!("Upload items are empty, exiting main upload loop."); - // To avoid empty final_balance when all items are skipped. - uploader.upload_final_balance = - uploader + + // To avoid empty final_balance when all items are skipped. Skip for tests. + #[cfg(not(test))] + { + uploader.upload_final_balance = uploader .wallet .balance_of_tokens() .await .inspect_err(|err| { error!("Failed to get wallet balance: {err:?}"); })?; + } #[cfg(test)] trace!("UPLOADER STATE: finished uploading all items {uploader:?}"); From 5e5aa3f5e1682df189dd2450024328fafb6a724f Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 18 Oct 2024 17:18:47 +0530 Subject: [PATCH 080/128] feat(uploader): error out on io error --- autonomi/src/uploader/mod.rs | 1 + autonomi/src/uploader/tests/mod.rs | 53 +++++++++++++++++++++++----- autonomi/src/uploader/tests/setup.rs | 16 +++++++-- autonomi/src/uploader/upload.rs | 24 +++++++++++-- 4 files changed, 80 insertions(+), 14 deletions(-) diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs index b7ad7039a0..e0617d6964 100644 --- a/autonomi/src/uploader/mod.rs +++ b/autonomi/src/uploader/mod.rs @@ -507,6 +507,7 @@ enum TaskResult { UploadOk(XorName), UploadErr { xorname: XorName, + io_error: Option>, }, } diff --git a/autonomi/src/uploader/tests/mod.rs b/autonomi/src/uploader/tests/mod.rs index 9c32c5af1e..80fb47f415 100644 --- a/autonomi/src/uploader/tests/mod.rs +++ b/autonomi/src/uploader/tests/mod.rs @@ -187,8 +187,8 @@ async fn chunks_should_perform_repayment_if_the_upload_fails_multiple_times() -> assert_select_different_payee: false, }, TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, + TestSteps::UploadItemErr { io_error: false }, + TestSteps::UploadItemErr { io_error: false }, TestSteps::GetStoreCostOk { trigger_zero_cost: false, assert_select_different_payee: true, @@ -235,8 +235,8 @@ async fn registers_should_perform_repayment_if_the_upload_fails_multiple_times() assert_select_different_payee: false, }, TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, + TestSteps::UploadItemErr { io_error: false }, + TestSteps::UploadItemErr { io_error: false }, TestSteps::GetStoreCostOk { trigger_zero_cost: false, assert_select_different_payee: true, @@ -484,16 +484,16 @@ async fn maximum_repayment_error_should_be_triggered_during_get_store_cost() -> assert_select_different_payee: false, }, TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, + TestSteps::UploadItemErr { io_error: false }, + TestSteps::UploadItemErr { io_error: false }, // first repayment TestSteps::GetStoreCostOk { trigger_zero_cost: false, assert_select_different_payee: true, }, TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, + TestSteps::UploadItemErr { io_error: false }, + TestSteps::UploadItemErr { io_error: false }, // thus after reaching max repayments, we should error out during get store cost. TestSteps::GetStoreCostErr { assert_select_different_payee: true, @@ -518,3 +518,40 @@ async fn maximum_repayment_error_should_be_triggered_during_get_store_cost() -> assert_matches!(events[1], UploadEvent::PaymentMade { .. }); Ok(()) } + +// 7. if we get io error during upload, then the entire upload should error out. +#[tokio::test] +async fn io_error_during_upload_should_stop_the_uploads() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); + let temp_dir = tempdir()?; + let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; + + // cfg + inner_uploader.set_batch_size(1); + inner_uploader.set_payment_batch_size(1); + inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); + + // the path to test + let steps = vec![ + TestSteps::GetStoreCostOk { + trigger_zero_cost: false, + assert_select_different_payee: false, + }, + TestSteps::MakePaymentOk, + TestSteps::UploadItemErr { io_error: true }, + ]; + + let (upload_handle, events_handle) = start_uploading_with_steps( + inner_uploader, + VecDeque::from(steps), + SecretKey::random(), + task_result_rx, + ); + + assert_matches!(upload_handle.await?, Err(UploadError::Io { .. })); + let events = events_handle.await?; + + assert_eq!(events.len(), 1); + assert_matches!(events[0], UploadEvent::PaymentMade { .. }); + Ok(()) +} diff --git a/autonomi/src/uploader/tests/setup.rs b/autonomi/src/uploader/tests/setup.rs index 53e628b99d..98fe2128a7 100644 --- a/autonomi/src/uploader/tests/setup.rs +++ b/autonomi/src/uploader/tests/setup.rs @@ -346,10 +346,18 @@ impl UploaderInterface for TestUploader { .expect("Failed to send task result"); }); } - TestSteps::UploadItemErr => { + TestSteps::UploadItemErr { io_error } => { handle.spawn(async move { + let io_error = if io_error { + Some(Box::new(std::io::Error::new( + std::io::ErrorKind::Other, + "Test IO Error", + ))) + } else { + None + }; task_result_sender - .send(TaskResult::UploadErr { xorname }) + .send(TaskResult::UploadErr { xorname, io_error }) .await .expect("Failed to send task result"); }); @@ -376,7 +384,9 @@ pub enum TestSteps { MakePaymentOk, MakePaymentErr, UploadItemOk, - UploadItemErr, + UploadItemErr { + io_error: bool, + }, } pub fn get_inner_uploader() -> Result<(InnerUploader, mpsc::Sender)> { diff --git a/autonomi/src/uploader/upload.rs b/autonomi/src/uploader/upload.rs index b07fe53492..8825223d8c 100644 --- a/autonomi/src/uploader/upload.rs +++ b/autonomi/src/uploader/upload.rs @@ -481,9 +481,16 @@ pub(super) async fn start_upload( } } } - TaskResult::UploadErr { xorname } => { + TaskResult::UploadErr { xorname, io_error } => { + if let Some(io_error) = io_error { + error!( + "Upload failed for {xorname:?} with error: {io_error:?}. Stopping upload." + ); + return Err(UploadError::Io(*io_error)); + } + let _ = uploader.on_going_uploads.remove(&xorname); - trace!("UploadErr for {xorname:?}"); + debug!("UploadErr for {xorname:?}. Keeping track of failure and trying again."); // keep track of the failure let n_errors = uploader.n_errors_during_uploads.entry(xorname).or_insert(0); @@ -672,9 +679,20 @@ impl UploaderInterface for Uploader { Ok(_) => { let _ = task_result_sender.send(TaskResult::UploadOk(xorname)).await; } + Err(UploadError::Io(io_error)) => { + let _ = task_result_sender + .send(TaskResult::UploadErr { + xorname, + io_error: Some(Box::new(io_error)), + }) + .await; + } Err(_) => { let _ = task_result_sender - .send(TaskResult::UploadErr { xorname }) + .send(TaskResult::UploadErr { + xorname, + io_error: None, + }) .await; } }; From a4f3da90ca2d0e2d22262f0e00c866dd4d92e718 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 18 Oct 2024 17:42:27 +0530 Subject: [PATCH 081/128] feat(uploader): upload chunk and registers with correct cfg --- autonomi/src/client/data.rs | 2 +- autonomi/src/client/data_private.rs | 2 +- autonomi/src/client/external_signer.rs | 4 +- autonomi/src/client/utils.rs | 75 +++++++++++---------- autonomi/src/uploader/mod.rs | 10 +-- autonomi/src/uploader/upload.rs | 89 +++++++++++++++++++++---- sn_protocol/src/messages/chunk_proof.rs | 9 +++ 7 files changed, 133 insertions(+), 58 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 869022cd37..c78a35de4e 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -143,7 +143,7 @@ impl Client { let proof_clone = proof.clone(); tasks.spawn(async move { self_clone - .chunk_upload_with_payment(chunk, proof_clone) + .chunk_upload_with_payment(chunk, proof_clone, None) .await .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}")) }); diff --git a/autonomi/src/client/data_private.rs b/autonomi/src/client/data_private.rs index b6d0bfa8a3..dc4d109d4b 100644 --- a/autonomi/src/client/data_private.rs +++ b/autonomi/src/client/data_private.rs @@ -88,7 +88,7 @@ impl Client { let proof_clone = proof.clone(); tasks.spawn(async move { self_clone - .chunk_upload_with_payment(chunk, proof_clone) + .chunk_upload_with_payment(chunk, proof_clone, None) .await .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}")) }); diff --git a/autonomi/src/client/external_signer.rs b/autonomi/src/client/external_signer.rs index 69abdd6c20..1d5b87c2e7 100644 --- a/autonomi/src/client/external_signer.rs +++ b/autonomi/src/client/external_signer.rs @@ -57,7 +57,7 @@ impl Client { if let Some(proof) = payment_proofs.get(map_xor_name) { debug!("Uploading data map chunk: {map_xor_name:?}"); - self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone()) + self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone(), None) .await .inspect_err(|err| error!("Error uploading data map chunk: {err:?}")) } else { @@ -74,7 +74,7 @@ impl Client { for chunk in chunks { if let Some(proof) = payment_proofs.get(chunk.name()) { let address = *chunk.address(); - self.chunk_upload_with_payment(chunk.clone(), proof.clone()) + self.chunk_upload_with_payment(chunk.clone(), proof.clone(), None) .await .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?; } diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index 4e0c0b27c8..f88bb1d1e1 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -6,8 +6,12 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use std::{collections::HashMap, num::NonZero}; - +use super::{ + data::{CostError, GetError, PayError, PutError}, + Client, +}; +use crate::self_encryption::DataMapLevel; +use crate::utils::payment_proof_from_quotes_and_payments; use bytes::Bytes; use libp2p::kad::{Quorum, Record}; use rand::{thread_rng, Rng}; @@ -21,15 +25,9 @@ use sn_protocol::{ storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind, RetryStrategy}, NetworkAddress, }; +use std::{collections::HashMap, num::NonZero}; use xor_name::XorName; -use super::{ - data::{CostError, GetError, PayError, PutError}, - Client, -}; -use crate::self_encryption::DataMapLevel; -use crate::utils::payment_proof_from_quotes_and_payments; - impl Client { /// Fetch and decrypt all chunks in the data map. pub(crate) async fn fetch_from_data_map(&self, data_map: &DataMap) -> Result { @@ -89,6 +87,7 @@ impl Client { &self, chunk: Chunk, payment: ProofOfPayment, + cfg: Option, ) -> Result<(), PutError> { let storing_node = payment.to_peer_id_payee().expect("Missing node Peer ID"); @@ -110,35 +109,39 @@ impl Client { expires: None, }; - let verification = { - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::N(NonZero::new(2).expect("2 is non-zero")), - retry_strategy: Some(RetryStrategy::Quick), - target_record: None, - expected_holders: Default::default(), - is_register: false, - }; + let put_cfg = if let Some(cfg) = cfg { + cfg + } else { + let verification = { + let verification_cfg = GetRecordCfg { + get_quorum: Quorum::N(NonZero::new(2).expect("2 is non-zero")), + retry_strategy: Some(RetryStrategy::Quick), + target_record: None, + expected_holders: Default::default(), + is_register: false, + }; + + let random_nonce = thread_rng().gen::(); + let expected_proof = + ChunkProof::from_chunk(&chunk, random_nonce).map_err(|err| { + PutError::Serialization(format!("Failed to obtain chunk proof: {err:?}")) + })?; - let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk) - .map_err(|e| PutError::Serialization(format!("Failed to serialize chunk: {e:?}")))? - .to_vec(); - let random_nonce = thread_rng().gen::(); - let expected_proof = ChunkProof::new(&stored_on_node, random_nonce); - - Some(( - VerificationKind::ChunkProof { - expected_proof, - nonce: random_nonce, - }, - verification_cfg, - )) - }; + Some(( + VerificationKind::ChunkProof { + expected_proof, + nonce: random_nonce, + }, + verification_cfg, + )) + }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy: Some(RetryStrategy::Balanced), - use_put_record_to: Some(vec![storing_node]), - verification, + PutRecordCfg { + put_quorum: Quorum::One, + retry_strategy: Some(RetryStrategy::Balanced), + use_put_record_to: Some(vec![storing_node]), + verification, + } }; Ok(self.network.put_record(record, &put_cfg).await?) } diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs index e0617d6964..df390671d7 100644 --- a/autonomi/src/uploader/mod.rs +++ b/autonomi/src/uploader/mod.rs @@ -69,6 +69,8 @@ pub enum UploadError { SequentialNetworkErrors, #[error("Too many sequential payment errors reported during upload")] SequentialUploadPaymentError, + #[error("Failed to serialize {0}")] + Serialization(String), #[error("Network Token error: {0:?}")] EvmNetworkTokenError(#[from] EvmNetworkTokenError), } @@ -236,7 +238,7 @@ impl Uploader { /// Sets the option to verify the data after they have been uploaded. /// - /// By default, this option is set to true. + /// By default, this option is set to `true`. pub fn set_verify_store(&mut self, verify_store: bool) { self.inner .as_mut() @@ -258,7 +260,7 @@ impl Uploader { /// This does not affect the retries during the Payment task. Use `set_max_repayments_for_failed_data` to /// configure the re-payment attempts. /// - /// By default, this option is set to RetryStrategy::Quick + /// By default, this option is set to `RetryStrategy::Quick` pub fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { self.inner .as_mut() @@ -269,7 +271,7 @@ impl Uploader { /// Sets the maximum number of repayments to perform if the initial payment failed. /// NOTE: This creates an extra Spend and uses the wallet funds. /// - /// By default, this option is set to 1 retry. + /// By default, this option is set to `1` retry. pub fn set_max_repayments_for_failed_data(&mut self, retries: usize) { self.inner .as_mut() @@ -281,7 +283,7 @@ impl Uploader { /// The registers are emitted through the event channel whenever they're completed, but this returns them /// through the UploadSummary when the whole upload process completes. /// - /// By default, this option is set to False + /// By default, this option is set to `False` pub fn set_collect_registers(&mut self, collect_registers: bool) { self.inner .as_mut() diff --git a/autonomi/src/uploader/upload.rs b/autonomi/src/uploader/upload.rs index 8825223d8c..1cf0757c0e 100644 --- a/autonomi/src/uploader/upload.rs +++ b/autonomi/src/uploader/upload.rs @@ -20,14 +20,19 @@ use crate::{ use bytes::Bytes; use itertools::Either; use libp2p::{kad::Quorum, PeerId}; +use rand::{thread_rng, Rng}; use sn_evm::{Amount, EvmWallet, ProofOfPayment}; use sn_networking::{GetRecordCfg, PayeeQuote, PutRecordCfg, VerificationKind}; use sn_protocol::{ + messages::ChunkProof, storage::{Chunk, RetryStrategy}, NetworkAddress, }; use sn_registers::RegisterAddress; -use std::collections::{HashMap, HashSet}; +use std::{ + collections::{HashMap, HashSet}, + num::NonZero, +}; use tokio::sync::mpsc; use xor_name::XorName; @@ -56,6 +61,8 @@ type Result = std::result::Result; // 1. since wallet balance is not fetched after finishing a task, get it before we send OK/Err // 2. Rework client event, it should be sent via the lowest level of the PUT. while for chunks it is done earlier (data.rs) // 3. track each batch with an id +// 4. create a irrecoverable error type, so we can bail on io/serialization etc. +// 5. separate cfgs/retries for register/chunk etc // 1. log whenever we insert/remove items. i.e., don't ignore values with `let _` /// The main loop that performs the upload process. @@ -925,7 +932,6 @@ impl InnerUploader { TaskResult::MakePaymentsOk { payment_proofs } } - // TODO: Don't allow > 1 batch. Err(err) => { let error = err.0; let _succeeded_batch = err.1; @@ -1047,8 +1053,8 @@ impl InnerUploader { client: Client, upload_item: UploadItem, previous_payments: Option, - _verify_store: bool, - _retry_strategy: RetryStrategy, + verify_store: bool, + retry_strategy: RetryStrategy, ) -> Result<()> { let xorname = upload_item.xorname(); @@ -1059,7 +1065,7 @@ impl InnerUploader { let payee = payment_proof.to_peer_id_payee().ok_or_else(|| { error!("Invalid payment proof found, could not obtain peer_id {payment_proof:?}"); UploadError::InternalError - }); + })?; debug!("Payments for upload item: {xorname:?} to {payee:?}: {payment_proof:?}"); @@ -1075,19 +1081,74 @@ impl InnerUploader { } }; - trace!("Client upload started for chunk: {xorname:?}"); - // TODO: pass in the verify_store, retry_startegy (or putcfg is even better). Also the fn has a panic. remove it. + let verification = if verify_store { + let verification_cfg = GetRecordCfg { + get_quorum: Quorum::N(NonZero::new(2).expect("2 is non-zero")), + retry_strategy: Some(retry_strategy), + target_record: None, + expected_holders: Default::default(), + is_register: false, + }; + + let random_nonce = thread_rng().gen::(); + let expected_proof = + ChunkProof::from_chunk(&chunk, random_nonce).map_err(|err| { + error!("Failed to create chunk proof: {err:?}"); + UploadError::Serialization(format!( + "Failed to create chunk proof for {xorname:?}" + )) + })?; + + Some(( + VerificationKind::ChunkProof { + expected_proof, + nonce: random_nonce, + }, + verification_cfg, + )) + } else { + None + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::One, + retry_strategy: Some(retry_strategy), + use_put_record_to: Some(vec![payee]), + verification, + }; + + debug!("Client upload started for chunk: {xorname:?}"); client - .chunk_upload_with_payment(chunk, payment_proof) + .chunk_upload_with_payment(chunk, payment_proof, Some(put_cfg)) .await?; - trace!("Client upload completed for chunk: {xorname:?}"); + debug!("Client upload completed for chunk: {xorname:?}"); } - UploadItem::Register { address: _, reg: _ } => { - // TODO: create a new fn to perform register_upload_with_payment - // reg.publish_register(Some((payment, payee)), verify_store) - // .await?; - trace!("Client upload completed for register: {xorname:?}"); + UploadItem::Register { address: _, reg } => { + debug!("Client upload started for register: {xorname:?}"); + let verification = if verify_store { + let get_cfg = GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: Some(retry_strategy), + target_record: None, + expected_holders: Default::default(), + is_register: true, + }; + Some((VerificationKind::Network, get_cfg)) + } else { + None + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::All, + retry_strategy: Some(retry_strategy), + use_put_record_to: Some(vec![payee]), + verification, + }; + client + .register_upload(®, Some(&payment_proof), &put_cfg) + .await?; + debug!("Client upload completed for register: {xorname:?}"); } } diff --git a/sn_protocol/src/messages/chunk_proof.rs b/sn_protocol/src/messages/chunk_proof.rs index 145aae00de..4fa3900d1f 100644 --- a/sn_protocol/src/messages/chunk_proof.rs +++ b/sn_protocol/src/messages/chunk_proof.rs @@ -6,6 +6,8 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::storage::{try_serialize_record, Chunk, RecordKind}; +use crate::Error; use serde::{Deserialize, Serialize}; use std::fmt; @@ -24,6 +26,13 @@ impl ChunkProof { ChunkProof(hash) } + pub fn from_chunk(chunk: &Chunk, nonce: Nonce) -> Result { + let stored_on_node = try_serialize_record(chunk, RecordKind::Chunk)?.to_vec(); + let proof = ChunkProof::new(&stored_on_node, nonce); + + Ok(proof) + } + pub fn verify(&self, other_proof: &ChunkProof) -> bool { self.0 == other_proof.0 } From c3208163b48c00e01b7e2881961e183598c3d2be Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 18 Oct 2024 20:06:10 +0530 Subject: [PATCH 082/128] feat(autonomi): use the uploader for register and chunk uploads --- Cargo.lock | 1 - autonomi-cli/src/utils.rs | 41 ++++++++---- autonomi/Cargo.toml | 1 - autonomi/src/client/data.rs | 78 +++++----------------- autonomi/src/client/data_private.rs | 70 +++++--------------- autonomi/src/client/external_signer.rs | 6 +- autonomi/src/client/fs.rs | 12 ++-- autonomi/src/client/fs_private.rs | 10 +-- autonomi/src/client/mod.rs | 12 ++-- autonomi/src/client/registers.rs | 91 +++++++++----------------- autonomi/src/client/utils.rs | 10 +-- autonomi/src/uploader/mod.rs | 33 ++++++++-- 12 files changed, 145 insertions(+), 220 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 834e26f3cf..82d5b99b06 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1088,7 +1088,6 @@ dependencies = [ "console_error_panic_hook", "const-hex", "custom_debug", - "dashmap", "evmlib", "eyre", "futures", diff --git a/autonomi-cli/src/utils.rs b/autonomi-cli/src/utils.rs index 5f031a3c24..80c46150ad 100644 --- a/autonomi-cli/src/utils.rs +++ b/autonomi-cli/src/utils.rs @@ -6,28 +6,40 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use autonomi::client::{Amount, ClientEvent, UploadSummary}; +use autonomi::client::{Amount, ClientEvent}; + +/// Summary of the upload operation. +#[derive(Debug, Clone)] +pub struct CliUploadSummary { + /// Total tokens spent during the upload. + pub tokens_spent: Amount, + /// Total number of records uploaded. + pub record_count: usize, +} /// Collects upload summary from the event receiver. /// Send a signal to the returned sender to stop collecting and to return the result via the join handle. pub fn collect_upload_summary( mut event_receiver: tokio::sync::mpsc::Receiver, ) -> ( - tokio::task::JoinHandle, + tokio::task::JoinHandle, tokio::sync::oneshot::Sender<()>, ) { let (upload_completed_tx, mut upload_completed_rx) = tokio::sync::oneshot::channel::<()>(); let stats_thread = tokio::spawn(async move { - let mut tokens_spent: Amount = Amount::from(0); - let mut record_count = 0; + let mut tokens: Amount = Amount::from(0); + let mut records = 0; loop { tokio::select! { event = event_receiver.recv() => { match event { - Some(ClientEvent::UploadComplete(upload_summary)) => { - tokens_spent += upload_summary.tokens_spent; - record_count += upload_summary.record_count; + Some(ClientEvent::UploadComplete { + tokens_spent, + record_count + }) => { + tokens += tokens_spent; + records += record_count; } None => break, } @@ -39,16 +51,19 @@ pub fn collect_upload_summary( // try to drain the event receiver in case there are any more events while let Ok(event) = event_receiver.try_recv() { match event { - ClientEvent::UploadComplete(upload_summary) => { - tokens_spent += upload_summary.tokens_spent; - record_count += upload_summary.record_count; + ClientEvent::UploadComplete { + tokens_spent, + record_count, + } => { + tokens += tokens_spent; + records += record_count; } } } - UploadSummary { - tokens_spent, - record_count, + CliUploadSummary { + tokens_spent: tokens, + record_count: records, } }); diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 324c3c6979..2213775d78 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -31,7 +31,6 @@ curv = { version = "0.10.1", package = "sn_curv", default-features = false, feat "num-bigint", ] } custom_debug = "~0.6.1" -dashmap = "~6.1.0" eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } const-hex = "1.12.0" hex = "~0.4.3" diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index c78a35de4e..542dea1f0b 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -6,15 +6,11 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::client::ClientEvent; +use crate::uploader::{UploadError, Uploader}; +use crate::{self_encryption::encrypt, Client}; use bytes::Bytes; use libp2p::kad::Quorum; -use tokio::task::{JoinError, JoinSet}; - -use std::collections::HashSet; -use xor_name::XorName; - -use crate::client::{ClientEvent, UploadSummary}; -use crate::{self_encryption::encrypt, Client}; use sn_evm::{Amount, AttoTokens}; use sn_evm::{EvmWallet, EvmWalletError}; use sn_networking::{GetRecordCfg, NetworkError}; @@ -22,6 +18,8 @@ use sn_protocol::{ storage::{try_deserialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind}, NetworkAddress, }; +use std::collections::HashSet; +use xor_name::XorName; /// Raw Data Address (points to a DataMap) pub type DataAddr = XorName; @@ -41,14 +39,14 @@ pub enum PutError { PayError(#[from] PayError), #[error("Serialization error: {0}")] Serialization(String), + #[error("Upload Error")] + Upload(#[from] UploadError), #[error("A wallet error occurred.")] Wallet(#[from] sn_evm::EvmError), #[error("The vault owner key does not match the client's public key")] VaultBadOwner, #[error("Payment unexpectedly invalid for {0:?}")] PaymentUnexpectedlyInvalid(NetworkAddress), - #[error("Could not simultaneously upload chunks: {0:?}")] - JoinError(tokio::task::JoinError), } /// Errors that can occur during the pay operation. @@ -80,8 +78,6 @@ pub enum GetError { /// Errors that can occur during the cost calculation. #[derive(Debug, thiserror::Error)] pub enum CostError { - #[error("Could not simultaneously fetch store costs: {0:?}")] - JoinError(JoinError), #[error("Failed to self-encrypt data.")] SelfEncryption(#[from] crate::self_encryption::Error), #[error("Could not get store quote for: {0:?} after several retries")] @@ -118,62 +114,24 @@ impl Client { debug!("Encryption took: {:.2?}", now.elapsed()); let map_xor_name = *data_map_chunk.address().xorname(); - let mut xor_names = vec![map_xor_name]; - for chunk in &chunks { - xor_names.push(*chunk.name()); - } + let mut uploader = Uploader::new(self.clone(), wallet.clone()); + uploader.insert_chunks(chunks); + uploader.insert_chunks(vec![data_map_chunk]); - // Pay for all chunks + data map chunk - info!("Paying for {} addresses", xor_names.len()); - let (payment_proofs, _free_chunks) = self - .pay(xor_names.into_iter(), wallet) - .await - .inspect_err(|err| error!("Error paying for data: {err:?}"))?; - - let mut record_count = 0; - - // Upload all the chunks in parallel including the data map chunk - debug!("Uploading {} chunks", chunks.len()); - let mut tasks = JoinSet::new(); - for chunk in chunks.into_iter().chain(std::iter::once(data_map_chunk)) { - let self_clone = self.clone(); - let address = *chunk.address(); - if let Some(proof) = payment_proofs.get(chunk.name()) { - let proof_clone = proof.clone(); - tasks.spawn(async move { - self_clone - .chunk_upload_with_payment(chunk, proof_clone, None) - .await - .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}")) - }); - } else { - debug!("Chunk at {address:?} was already paid for so skipping"); - } - } - while let Some(result) = tasks.join_next().await { - result - .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) - .map_err(PutError::JoinError)? - .inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; - record_count += 1; - } + let summary = uploader.start_upload().await?; if let Some(channel) = self.client_event_sender.as_ref() { - let tokens_spent = payment_proofs - .values() - .map(|proof| proof.quote.cost.as_atto()) - .sum::(); - - let summary = UploadSummary { - record_count, - tokens_spent, - }; - if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { + if let Err(err) = channel + .send(ClientEvent::UploadComplete { + record_count: summary.uploaded_count, + tokens_spent: summary.storage_cost, + }) + .await + { error!("Failed to send client event: {err:?}"); } } - Ok(map_xor_name) } diff --git a/autonomi/src/client/data_private.rs b/autonomi/src/client/data_private.rs index dc4d109d4b..b1cda1b3f7 100644 --- a/autonomi/src/client/data_private.rs +++ b/autonomi/src/client/data_private.rs @@ -6,17 +6,15 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use std::hash::{DefaultHasher, Hash, Hasher}; - +use super::data::{GetError, PutError}; +use crate::client::ClientEvent; +use crate::uploader::Uploader; +use crate::{self_encryption::encrypt, Client}; use bytes::Bytes; use serde::{Deserialize, Serialize}; -use sn_evm::{Amount, EvmWallet}; +use sn_evm::EvmWallet; use sn_protocol::storage::Chunk; -use tokio::task::JoinSet; - -use super::data::{GetError, PutError}; -use crate::client::{ClientEvent, UploadSummary}; -use crate::{self_encryption::encrypt, Client}; +use std::hash::{DefaultHasher, Hash, Hasher}; /// Private data on the network can be accessed with this #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd, Ord)] @@ -69,53 +67,21 @@ impl Client { let (data_map_chunk, chunks) = encrypt(data)?; debug!("Encryption took: {:.2?}", now.elapsed()); - // Pay for all chunks - let xor_names: Vec<_> = chunks.iter().map(|chunk| *chunk.name()).collect(); - info!("Paying for {} addresses", xor_names.len()); - let (payment_proofs, _free_chunks) = self - .pay(xor_names.into_iter(), wallet) - .await - .inspect_err(|err| error!("Error paying for data: {err:?}"))?; - // Upload the chunks with the payments - let mut record_count = 0; - debug!("Uploading {} chunks", chunks.len()); - let mut tasks = JoinSet::new(); - for chunk in chunks { - let self_clone = self.clone(); - let address = *chunk.address(); - if let Some(proof) = payment_proofs.get(chunk.name()) { - let proof_clone = proof.clone(); - tasks.spawn(async move { - self_clone - .chunk_upload_with_payment(chunk, proof_clone, None) - .await - .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}")) - }); - } else { - debug!("Chunk at {address:?} was already paid for so skipping"); - } - } - while let Some(result) = tasks.join_next().await { - result - .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) - .map_err(PutError::JoinError)? - .inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; - record_count += 1; - } + let mut uploader = Uploader::new(self.clone(), wallet.clone()); + uploader.insert_chunks(chunks); + uploader.insert_chunks(vec![data_map_chunk.clone()]); - // Reporting - if let Some(channel) = self.client_event_sender.as_ref() { - let tokens_spent = payment_proofs - .values() - .map(|proof| proof.quote.cost.as_atto()) - .sum::(); + let summary = uploader.start_upload().await?; - let summary = UploadSummary { - record_count, - tokens_spent, - }; - if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { + if let Some(channel) = self.client_event_sender.as_ref() { + if let Err(err) = channel + .send(ClientEvent::UploadComplete { + record_count: summary.uploaded_count, + tokens_spent: summary.storage_cost, + }) + .await + { error!("Failed to send client event: {err:?}"); } } diff --git a/autonomi/src/client/external_signer.rs b/autonomi/src/client/external_signer.rs index 1d5b87c2e7..5057bc3b28 100644 --- a/autonomi/src/client/external_signer.rs +++ b/autonomi/src/client/external_signer.rs @@ -59,10 +59,10 @@ impl Client { debug!("Uploading data map chunk: {map_xor_name:?}"); self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone(), None) .await - .inspect_err(|err| error!("Error uploading data map chunk: {err:?}")) - } else { - Ok(()) + .inspect_err(|err| error!("Error uploading data map chunk: {err:?}"))?; } + + Ok(()) } async fn upload_chunks( diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index d7f243df68..43ab87f504 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -20,7 +20,7 @@ use super::data::{DataAddr, GetError, PutError}; /// Errors that can occur during the file upload operation. #[cfg(feature = "fs")] #[derive(Debug, thiserror::Error)] -pub enum UploadError { +pub enum FileUploadError { #[error("Failed to recursively traverse directory")] WalkDir(#[from] walkdir::Error), #[error("Input/output failure")] @@ -38,7 +38,7 @@ pub enum UploadError { #[cfg(feature = "fs")] /// Errors that can occur during the download operation. #[derive(Debug, thiserror::Error)] -pub enum DownloadError { +pub enum FileDownloadError { #[error("Failed to download file")] GetError(#[from] GetError), #[error("IO failure")] @@ -67,7 +67,7 @@ impl Client { &self, data_addr: DataAddr, to_dest: PathBuf, - ) -> Result<(), DownloadError> { + ) -> Result<(), FileDownloadError> { let data = self.data_get(data_addr).await?; if let Some(parent) = to_dest.parent() { tokio::fs::create_dir_all(parent).await?; @@ -81,7 +81,7 @@ impl Client { &self, archive_addr: ArchiveAddr, to_dest: PathBuf, - ) -> Result<(), DownloadError> { + ) -> Result<(), FileDownloadError> { let archive = self.archive_get(archive_addr).await?; for (path, addr, _meta) in archive.iter() { self.file_download(*addr, to_dest.join(path)).await?; @@ -95,7 +95,7 @@ impl Client { &self, dir_path: PathBuf, wallet: &EvmWallet, - ) -> Result { + ) -> Result { let mut archive = Archive::new(); for entry in walkdir::WalkDir::new(dir_path) { @@ -129,7 +129,7 @@ impl Client { &self, path: PathBuf, wallet: &EvmWallet, - ) -> Result { + ) -> Result { let data = tokio::fs::read(path).await?; let data = Bytes::from(data); let addr = self.data_put(data, wallet).await?; diff --git a/autonomi/src/client/fs_private.rs b/autonomi/src/client/fs_private.rs index 0d9b819d70..31f7857ec6 100644 --- a/autonomi/src/client/fs_private.rs +++ b/autonomi/src/client/fs_private.rs @@ -21,7 +21,7 @@ use std::path::PathBuf; use super::archive_private::{PrivateArchive, PrivateArchiveAccess}; use super::data_private::PrivateDataAccess; -use super::fs::{DownloadError, UploadError}; +use super::fs::{FileDownloadError, FileUploadError}; impl Client { /// Download a private file from network to local file system @@ -29,7 +29,7 @@ impl Client { &self, data_access: PrivateDataAccess, to_dest: PathBuf, - ) -> Result<(), DownloadError> { + ) -> Result<(), FileDownloadError> { let data = self.private_data_get(data_access).await?; if let Some(parent) = to_dest.parent() { tokio::fs::create_dir_all(parent).await?; @@ -43,7 +43,7 @@ impl Client { &self, archive_access: PrivateArchiveAccess, to_dest: PathBuf, - ) -> Result<(), DownloadError> { + ) -> Result<(), FileDownloadError> { let archive = self.private_archive_get(archive_access).await?; for (path, addr, _meta) in archive.iter() { self.private_file_download(addr.clone(), to_dest.join(path)) @@ -58,7 +58,7 @@ impl Client { &self, dir_path: PathBuf, wallet: &EvmWallet, - ) -> Result { + ) -> Result { let mut archive = PrivateArchive::new(); for entry in walkdir::WalkDir::new(dir_path) { @@ -92,7 +92,7 @@ impl Client { &self, path: PathBuf, wallet: &EvmWallet, - ) -> Result { + ) -> Result { let data = tokio::fs::read(path).await?; let data = Bytes::from(data); let addr = self.private_data_put(data, wallet).await?; diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 0585efa037..172fb9ba4f 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -207,12 +207,8 @@ async fn handle_event_receiver( /// Events that can be broadcasted by the client. #[derive(Debug, Clone)] pub enum ClientEvent { - UploadComplete(UploadSummary), -} - -/// Summary of an upload operation. -#[derive(Debug, Clone)] -pub struct UploadSummary { - pub record_count: usize, - pub tokens_spent: Amount, + UploadComplete { + record_count: usize, + tokens_spent: Amount, + }, } diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index ea47d363fb..76e3044730 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -6,35 +6,30 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use super::data::CostError; +use crate::client::{Client, ClientEvent}; +use crate::uploader::{UploadError, Uploader}; /// Register Secret Key pub use bls::SecretKey as RegisterSecretKey; +use bytes::Bytes; +use libp2p::kad::{Quorum, Record}; use sn_evm::Amount; use sn_evm::AttoTokens; -use sn_evm::EvmWalletError; +use sn_evm::EvmWallet; use sn_evm::ProofOfPayment; use sn_networking::VerificationKind; -use sn_protocol::storage::RetryStrategy; -pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; - -use crate::client::data::PayError; -use crate::client::Client; -use crate::client::ClientEvent; -use crate::client::UploadSummary; -use bytes::Bytes; -use libp2p::kad::{Quorum, Record}; -use sn_evm::EvmWallet; use sn_networking::{GetRecordCfg, GetRecordError, NetworkError, PutRecordCfg}; use sn_protocol::storage::try_deserialize_record; use sn_protocol::storage::try_serialize_record; use sn_protocol::storage::RecordKind; +use sn_protocol::storage::RetryStrategy; use sn_protocol::NetworkAddress; use sn_registers::Register as BaseRegister; +pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; use sn_registers::{Permissions, RegisterCrdt, RegisterOp, SignedRegister}; use std::collections::BTreeSet; use xor_name::XorName; -use super::data::CostError; - #[derive(Debug, thiserror::Error)] pub enum RegisterError { #[error("Cost error: {0}")] @@ -45,16 +40,12 @@ pub enum RegisterError { Serialization, #[error("Register could not be verified (corrupt)")] FailedVerification, - #[error("Payment failure occurred during register creation.")] - Pay(#[from] PayError), - #[error("Failed to retrieve wallet payment")] - Wallet(#[from] EvmWalletError), + #[error("Upload Error")] + Upload(#[from] UploadError), #[error("Failed to write to low-level register")] Write(#[source] sn_registers::Error), #[error("Failed to sign register")] CouldNotSign(#[source] sn_registers::Error), - #[error("Received invalid quote from node, this node is possibly malfunctioning, try another node by trying another register name")] - InvalidQuote, } #[derive(Clone, Debug)] @@ -317,57 +308,39 @@ impl Client { // Owner can write to the register. let register = Register::new(Some(value), name, owner, permissions)?; - let address = register.address(); + let address = *register.address(); - let reg_xor = address.xorname(); - debug!("Paying for register at address: {address}"); - let (payment_proofs, _skipped) = self - .pay(std::iter::once(reg_xor), wallet) - .await - .inspect_err(|err| { - error!("Failed to pay for register at address: {address} : {err}") - })?; - let proof = payment_proofs.get(®_xor).ok_or_else(|| { - // register was skipped, meaning it was already paid for - error!("Register at address: {address} was already paid for"); - RegisterError::Network(NetworkError::RegisterAlreadyExists) - })?; + let mut uploader = Uploader::new(self.clone(), wallet.clone()); + uploader.insert_register(vec![register]); + uploader.set_collect_registers(true); - let payee = proof - .to_peer_id_payee() - .ok_or(RegisterError::InvalidQuote) - .inspect_err(|err| error!("Failed to get payee from payment proof: {err}"))?; - - let get_cfg = GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: Some(RetryStrategy::default()), - target_record: None, - expected_holders: Default::default(), - is_register: true, - }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: None, - use_put_record_to: Some(vec![payee]), - verification: Some((VerificationKind::Network, get_cfg)), - }; + let summary = uploader.start_upload().await?; - self.register_upload(®ister, Some(proof), &put_cfg) - .await?; + let register = summary + .uploaded_registers + .get(&address) + .ok_or_else(|| { + error!("Failed to get register with name: {name}"); + RegisterError::Upload(UploadError::InternalError) + })? + .clone(); if let Some(channel) = self.client_event_sender.as_ref() { - let summary = UploadSummary { - record_count: 1, - tokens_spent: proof.quote.cost.as_atto(), - }; - if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { - error!("Failed to send client event: {err}"); + if let Err(err) = channel + .send(ClientEvent::UploadComplete { + record_count: summary.uploaded_count, + tokens_spent: summary.storage_cost, + }) + .await + { + error!("Failed to send client event: {err:?}"); } } Ok(register) } + // Used by the uploader. pub(crate) async fn register_upload( &self, register: &Register, diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index f88bb1d1e1..4515adeca9 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -7,11 +7,11 @@ // permissions and limitations relating to use of the SAFE Network Software. use super::{ - data::{CostError, GetError, PayError, PutError}, + data::{CostError, GetError, PayError}, Client, }; -use crate::self_encryption::DataMapLevel; use crate::utils::payment_proof_from_quotes_and_payments; +use crate::{self_encryption::DataMapLevel, uploader::UploadError}; use bytes::Bytes; use libp2p::kad::{Quorum, Record}; use rand::{thread_rng, Rng}; @@ -88,7 +88,7 @@ impl Client { chunk: Chunk, payment: ProofOfPayment, cfg: Option, - ) -> Result<(), PutError> { + ) -> Result<(), UploadError> { let storing_node = payment.to_peer_id_payee().expect("Missing node Peer ID"); debug!("Storing chunk: {chunk:?} to {:?}", storing_node); @@ -100,7 +100,7 @@ impl Client { key: key.clone(), value: try_serialize_record(&(payment, chunk.clone()), record_kind) .map_err(|e| { - PutError::Serialization(format!( + UploadError::Serialization(format!( "Failed to serialize chunk with payment: {e:?}" )) })? @@ -124,7 +124,7 @@ impl Client { let random_nonce = thread_rng().gen::(); let expected_proof = ChunkProof::from_chunk(&chunk, random_nonce).map_err(|err| { - PutError::Serialization(format!("Failed to obtain chunk proof: {err:?}")) + UploadError::Serialization(format!("Failed to obtain chunk proof: {err:?}")) })?; Some(( diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs index df390671d7..e34f281ac3 100644 --- a/autonomi/src/uploader/mod.rs +++ b/autonomi/src/uploader/mod.rs @@ -10,7 +10,6 @@ mod tests; mod upload; -use crate::client::data::PutError; use crate::client::registers::{Register, RegisterError}; use crate::Client; use itertools::Either; @@ -48,6 +47,8 @@ pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 1; #[derive(Debug, thiserror::Error)] pub enum UploadError { + #[error("Network Token error: {0:?}")] + EvmNetworkTokenError(#[from] EvmNetworkTokenError), #[error("Internal Error")] InternalError, #[error("Invalid cfg: {0:?}")] @@ -61,18 +62,34 @@ pub enum UploadError { }, #[error("Network error: {0:?}")] Network(#[from] NetworkError), - #[error("Put error: {0:?}")] - PutError(#[from] PutError), - #[error("Register error: {0:?}")] - RegisterError(#[from] RegisterError), + #[error("Register could not be verified (corrupt)")] + RegisterFailedVerification, + #[error("Failed to write to low-level register")] + RegisterWrite(#[source] sn_registers::Error), + #[error("Failed to sign register")] + RegisterCouldNotSign(#[source] sn_registers::Error), #[error("Multiple consecutive network errors reported during upload")] SequentialNetworkErrors, #[error("Too many sequential payment errors reported during upload")] SequentialUploadPaymentError, #[error("Failed to serialize {0}")] Serialization(String), - #[error("Network Token error: {0:?}")] - EvmNetworkTokenError(#[from] EvmNetworkTokenError), +} + +// UploadError is used inside RegisterError, but the uploader emits RegisterError. So this is used to avoid +// recursive enum definition. +impl From for UploadError { + fn from(err: RegisterError) -> Self { + match err { + RegisterError::Network(err) => Self::Network(err), + RegisterError::Write(err) => Self::RegisterWrite(err), + RegisterError::CouldNotSign(err) => Self::RegisterCouldNotSign(err), + RegisterError::Cost(_) => Self::InternalError, + RegisterError::Serialization => Self::Serialization("Register".to_string()), + RegisterError::FailedVerification => Self::RegisterFailedVerification, + RegisterError::Upload(err) => err, + } + } } /// The set of options to pass into the `Uploader` @@ -108,7 +125,9 @@ pub struct UploadSummary { pub final_balance: Amount, pub uploaded_addresses: HashSet, pub uploaded_registers: HashMap, + /// The number of records that were paid for and uploaded to the network. pub uploaded_count: usize, + /// The number of records that were skipped during because they were already present in the network. pub skipped_count: usize, } From f4cd35c522cde33c0463f5e80db763fbf6979b2d Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 18 Oct 2024 20:21:45 +0530 Subject: [PATCH 083/128] feat: provide target arch based mpsc channel --- Cargo.lock | 13 +++++++++++++ autonomi/src/uploader/mod.rs | 4 ++-- autonomi/src/uploader/upload.rs | 27 +++++++++++++-------------- sn_networking/Cargo.toml | 1 + sn_networking/src/target_arch.rs | 25 +++++++++++++++++++++++-- 5 files changed, 52 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82d5b99b06..a9e73bd939 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -950,6 +950,18 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-io" version = "2.3.4" @@ -8266,6 +8278,7 @@ name = "sn_networking" version = "0.19.0" dependencies = [ "aes-gcm-siv", + "async-channel", "async-trait", "backoff", "blsttc", diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs index e34f281ac3..5280fd6815 100644 --- a/autonomi/src/uploader/mod.rs +++ b/autonomi/src/uploader/mod.rs @@ -15,6 +15,7 @@ use crate::Client; use itertools::Either; use sn_evm::EvmWallet; use sn_evm::{Amount, EvmNetworkTokenError, ProofOfPayment}; +use sn_networking::target_arch::{mpsc, mpsc_channel}; use sn_networking::{NetworkError, PayeeQuote}; use sn_protocol::{ storage::{Chunk, ChunkAddress, RetryStrategy}, @@ -26,7 +27,6 @@ use std::{ fmt::Debug, path::PathBuf, }; -use tokio::sync::mpsc; use upload::InnerUploader; use xor_name::XorName; @@ -430,7 +430,7 @@ impl InnerUploader { } pub(super) fn get_event_receiver(&mut self) -> mpsc::Receiver { - let (tx, rx) = mpsc::channel(100); + let (tx, rx) = mpsc_channel(100); self.event_sender = Some(tx); rx } diff --git a/autonomi/src/uploader/upload.rs b/autonomi/src/uploader/upload.rs index 1cf0757c0e..c6697df9b1 100644 --- a/autonomi/src/uploader/upload.rs +++ b/autonomi/src/uploader/upload.rs @@ -22,6 +22,7 @@ use itertools::Either; use libp2p::{kad::Quorum, PeerId}; use rand::{thread_rng, Rng}; use sn_evm::{Amount, EvmWallet, ProofOfPayment}; +use sn_networking::target_arch::{mpsc, mpsc_channel, mpsc_recv, spawn}; use sn_networking::{GetRecordCfg, PayeeQuote, PutRecordCfg, VerificationKind}; use sn_protocol::{ messages::ChunkProof, @@ -33,7 +34,6 @@ use std::{ collections::{HashMap, HashSet}, num::NonZero, }; -use tokio::sync::mpsc; use xor_name::XorName; /// The maximum number of sequential payment failures before aborting the upload process. @@ -80,9 +80,9 @@ pub(super) async fn start_upload( channels } else { // 6 because of the 6 pipelines, 1 for redundancy. - mpsc::channel(uploader.cfg.batch_size * 6 + 1) + mpsc_channel(uploader.cfg.batch_size * 6 + 1) }; - let (make_payment_sender, make_payment_receiver) = mpsc::channel(uploader.cfg.batch_size); + let (make_payment_sender, make_payment_receiver) = mpsc_channel(uploader.cfg.batch_size); uploader.start_payment_processing_thread( make_payment_receiver, @@ -273,8 +273,7 @@ pub(super) async fn start_upload( trace!("UPLOADER STATE: before await task result: {uploader:?}"); trace!("Fetching task result"); - let task_result = task_result_receiver - .recv() + let task_result = mpsc_recv(&mut task_result_receiver) .await .ok_or(UploadError::InternalError)?; trace!("Received task result: {task_result:?}"); @@ -556,7 +555,7 @@ impl UploaderInterface for Uploader { Ok(vec![]) }; - let _handle = tokio::spawn(async move { + let _handle = spawn(async move { let task_result = match InnerUploader::get_store_cost( client, xorname, @@ -600,7 +599,7 @@ impl UploaderInterface for Uploader { ) { let xorname = reg_addr.xorname(); trace!("Spawning get_register for {xorname:?}"); - let _handle = tokio::spawn(async move { + let _handle = spawn(async move { let task_result = match InnerUploader::get_register(client, reg_addr).await { Ok(register) => { debug!("Register retrieved for {xorname:?}"); @@ -627,7 +626,7 @@ impl UploaderInterface for Uploader { ) { let xorname = upload_item.xorname(); trace!("Spawning push_register for {xorname:?}"); - let _handle = tokio::spawn(async move { + let _handle = spawn(async move { let task_result = match InnerUploader::push_register(client, upload_item, verify_store) .await { @@ -652,7 +651,7 @@ impl UploaderInterface for Uploader { to_send: Option<(UploadItem, Box)>, make_payment_sender: mpsc::Sender)>>, ) { - let _handle = tokio::spawn(async move { + let _handle = spawn(async move { let _ = make_payment_sender.send(to_send).await; }); } @@ -670,7 +669,7 @@ impl UploaderInterface for Uploader { let last_payment = previous_payments.and_then(|payments| payments.last().cloned()); - let _handle = tokio::spawn(async move { + let _handle = spawn(async move { let xorname = upload_item.xorname(); let result = InnerUploader::upload_item( client, @@ -874,14 +873,14 @@ impl InnerUploader { ) -> Result<()> { let wallet = self.wallet.clone(); - let _handle = tokio::spawn(async move { + let _handle = spawn(async move { debug!("Spawning the long running make payment processing loop."); let mut to_be_paid_list = Vec::new(); let mut cost_map = HashMap::new(); let mut got_a_previous_force_payment = false; - while let Some(payment) = make_payment_receiver.recv().await { + while let Some(payment) = mpsc_recv(&mut make_payment_receiver).await { let make_payments = if let Some((item, quote)) = payment { to_be_paid_list.push(( quote.2.hash(), @@ -947,7 +946,7 @@ impl InnerUploader { } }; let pay_for_chunk_sender_clone = task_result_sender.clone(); - let _handle = tokio::spawn(async move { + let _handle = spawn(async move { let _ = pay_for_chunk_sender_clone.send(result).await; }); @@ -1160,7 +1159,7 @@ impl InnerUploader { fn emit_upload_event(&mut self, event: UploadEvent) { if let Some(sender) = self.event_sender.as_ref() { let sender_clone = sender.clone(); - let _handle = tokio::spawn(async move { + let _handle = spawn(async move { if let Err(err) = sender_clone.send(event).await { error!("Error emitting upload event: {err:?}"); } diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 4f2270ff37..9c76065bf0 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -94,6 +94,7 @@ workspace = true crate-type = ["cdylib", "rlib"] [target.'cfg(target_arch = "wasm32")'.dependencies] +async-channel = "2.3.1" getrandom = { version = "0.2.12", features = ["js"] } libp2p = { version = "0.54.1", features = [ "tokio", diff --git a/sn_networking/src/target_arch.rs b/sn_networking/src/target_arch.rs index 35a1b62092..b53ce472c5 100644 --- a/sn_networking/src/target_arch.rs +++ b/sn_networking/src/target_arch.rs @@ -19,12 +19,33 @@ pub use tokio::{ #[cfg(target_arch = "wasm32")] pub use std::time::Duration; - +#[cfg(target_arch = "wasm32")] +pub use wasm_bindgen_futures::spawn_local as spawn; #[cfg(target_arch = "wasm32")] pub use wasmtimer::{ std::{Instant, SystemTime, UNIX_EPOCH}, tokio::{interval, sleep, timeout, Interval}, }; +/// === Channels ==== + +#[cfg(not(target_arch = "wasm32"))] +pub use tokio::sync::mpsc; +#[cfg(not(target_arch = "wasm32"))] +pub use tokio::sync::mpsc::channel as mpsc_channel; + +#[cfg(not(target_arch = "wasm32"))] +pub async fn mpsc_recv(mpsc: &mut mpsc::Receiver) -> Option { + mpsc.recv().await +} + +// futures crate has different function signatures than tokio, so instead we use async_channel here. #[cfg(target_arch = "wasm32")] -pub use wasm_bindgen_futures::spawn_local as spawn; +pub use async_channel as mpsc; +#[cfg(target_arch = "wasm32")] +pub use async_channel::bounded as mpsc_channel; + +#[cfg(target_arch = "wasm32")] +pub async fn mpsc_recv(mpsc: &mut mpsc::Receiver) -> Option { + mpsc.recv().await.ok() +} From 78c526af736e88a22a98e76940e22a2707a07b80 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 18 Oct 2024 20:34:55 +0530 Subject: [PATCH 084/128] feat(uploader): feature gate registers --- autonomi/src/uploader/mod.rs | 23 +++++++ autonomi/src/uploader/upload.rs | 116 ++++++++++++++++++++------------ 2 files changed, 97 insertions(+), 42 deletions(-) diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs index 5280fd6815..493dbefcbf 100644 --- a/autonomi/src/uploader/mod.rs +++ b/autonomi/src/uploader/mod.rs @@ -10,6 +10,7 @@ mod tests; mod upload; +#[cfg(feature = "registers")] use crate::client::registers::{Register, RegisterError}; use crate::Client; use itertools::Either; @@ -21,6 +22,7 @@ use sn_protocol::{ storage::{Chunk, ChunkAddress, RetryStrategy}, NetworkAddress, }; +#[cfg(feature = "registers")] use sn_registers::RegisterAddress; use std::{ collections::{HashMap, HashSet}, @@ -62,10 +64,13 @@ pub enum UploadError { }, #[error("Network error: {0:?}")] Network(#[from] NetworkError), + #[cfg(feature = "registers")] #[error("Register could not be verified (corrupt)")] RegisterFailedVerification, + #[cfg(feature = "registers")] #[error("Failed to write to low-level register")] RegisterWrite(#[source] sn_registers::Error), + #[cfg(feature = "registers")] #[error("Failed to sign register")] RegisterCouldNotSign(#[source] sn_registers::Error), #[error("Multiple consecutive network errors reported during upload")] @@ -78,6 +83,7 @@ pub enum UploadError { // UploadError is used inside RegisterError, but the uploader emits RegisterError. So this is used to avoid // recursive enum definition. +#[cfg(feature = "registers")] impl From for UploadError { fn from(err: RegisterError) -> Self { match err { @@ -101,6 +107,7 @@ pub struct UploadCfg { pub show_holders: bool, pub retry_strategy: RetryStrategy, pub max_repayments_for_failed_data: usize, + #[cfg(feature = "registers")] pub collect_registers: bool, } @@ -113,6 +120,7 @@ impl Default for UploadCfg { show_holders: false, retry_strategy: RetryStrategy::Balanced, max_repayments_for_failed_data: MAX_REPAYMENTS_PER_FAILED_ITEM, + #[cfg(feature = "registers")] collect_registers: false, } } @@ -124,6 +132,7 @@ pub struct UploadSummary { pub storage_cost: Amount, pub final_balance: Amount, pub uploaded_addresses: HashSet, + #[cfg(feature = "registers")] pub uploaded_registers: HashMap, /// The number of records that were paid for and uploaded to the network. pub uploaded_count: usize, @@ -135,6 +144,7 @@ impl UploadSummary { /// Merge two UploadSummary together. pub fn merge(mut self, other: Self) -> Result> { self.uploaded_addresses.extend(other.uploaded_addresses); + #[cfg(feature = "registers")] self.uploaded_registers.extend(other.uploaded_registers); let summary = Self { @@ -153,6 +163,7 @@ impl UploadSummary { UploadError::InternalError })?, uploaded_addresses: self.uploaded_addresses, + #[cfg(feature = "registers")] uploaded_registers: self.uploaded_registers, uploaded_count: self.uploaded_count + other.uploaded_count, skipped_count: self.skipped_count + other.skipped_count, @@ -168,6 +179,7 @@ pub enum UploadEvent { ChunkUploaded(ChunkAddress), /// Uploaded a Register to the network. /// The returned register is just the passed in register. + #[cfg(feature = "registers")] RegisterUploaded(Register), /// /// The Chunk already exists in the network. No payments were made. @@ -175,6 +187,7 @@ pub enum UploadEvent { /// The Register already exists in the network. The locally register changes were pushed to the network. /// No payments were made. /// The returned register contains the remote replica merged with the passed in register. + #[cfg(feature = "registers")] RegisterUpdated(Register), /// Payment for a batch of records has been made. PaymentMade { tokens_spent: Amount }, @@ -303,6 +316,7 @@ impl Uploader { /// through the UploadSummary when the whole upload process completes. /// /// By default, this option is set to `False` + #[cfg(feature = "registers")] pub fn set_collect_registers(&mut self, collect_registers: bool) { self.inner .as_mut() @@ -336,6 +350,7 @@ impl Uploader { } /// Insert a list of registers to upload. + #[cfg(feature = "registers")] pub fn insert_register(&mut self, registers: impl IntoIterator) { self.inner .as_mut() @@ -425,6 +440,7 @@ impl InnerUploader { self.cfg.max_repayments_for_failed_data = retries; } + #[cfg(feature = "registers")] pub(super) fn set_collect_registers(&mut self, collect_registers: bool) { self.cfg.collect_registers = collect_registers; } @@ -461,6 +477,7 @@ impl InnerUploader { })); } + #[cfg(feature = "registers")] pub(super) fn insert_register(&mut self, registers: impl IntoIterator) { self.all_upload_items .extend(registers.into_iter().map(|reg| { @@ -478,6 +495,7 @@ enum UploadItem { // Either the actual chunk or the path to the chunk. chunk: Either, }, + #[cfg(feature = "registers")] Register { address: RegisterAddress, reg: Register, @@ -488,6 +506,7 @@ impl UploadItem { fn address(&self) -> NetworkAddress { match self { Self::Chunk { address, .. } => NetworkAddress::from_chunk_address(*address), + #[cfg(feature = "registers")] Self::Register { address, .. } => NetworkAddress::from_register_address(*address), } } @@ -495,6 +514,7 @@ impl UploadItem { fn xorname(&self) -> XorName { match self { UploadItem::Chunk { address, .. } => *address.xorname(), + #[cfg(feature = "registers")] UploadItem::Register { address, .. } => address.xorname(), } } @@ -502,10 +522,13 @@ impl UploadItem { #[derive(Debug)] enum TaskResult { + #[cfg(feature = "registers")] GetRegisterFromNetworkOk { remote_register: Register, }, + #[cfg(feature = "registers")] GetRegisterFromNetworkErr(XorName), + #[cfg(feature = "registers")] PushRegisterOk { updated_register: Register, }, diff --git a/autonomi/src/uploader/upload.rs b/autonomi/src/uploader/upload.rs index c6697df9b1..c0e448a222 100644 --- a/autonomi/src/uploader/upload.rs +++ b/autonomi/src/uploader/upload.rs @@ -13,10 +13,9 @@ use super::{ GetStoreCostStrategy, TaskResult, UploadCfg, UploadEvent, UploadItem, UploadSummary, Uploader, UploaderInterface, PAYMENT_BATCH_SIZE, }; -use crate::{ - client::registers::Register, uploader::UploadError, - utils::payment_proof_from_quotes_and_payments, Client, -}; +#[cfg(feature = "registers")] +use crate::client::registers::Register; +use crate::{uploader::UploadError, utils::payment_proof_from_quotes_and_payments, Client}; use bytes::Bytes; use itertools::Either; use libp2p::{kad::Quorum, PeerId}; @@ -29,6 +28,7 @@ use sn_protocol::{ storage::{Chunk, RetryStrategy}, NetworkAddress, }; +#[cfg(feature = "registers")] use sn_registers::RegisterAddress; use std::{ collections::{HashMap, HashSet}, @@ -104,17 +104,20 @@ pub(super) async fn start_upload( .collect(); // registers have to be verified + merged with remote replica, so we have to fetch it first. - uploader.pending_to_get_register = uploader - .all_upload_items - .iter() - .filter_map(|(_xorname, item)| { - if let UploadItem::Register { address, .. } = item { - Some(*address) - } else { - None - } - }) - .collect(); + #[cfg(feature = "registers")] + { + uploader.pending_to_get_register = uploader + .all_upload_items + .iter() + .filter_map(|(_xorname, item)| { + if let UploadItem::Register { address, .. } = item { + Some(*address) + } else { + None + } + }) + .collect(); + } loop { // Break if we have uploaded all the items. @@ -162,39 +165,45 @@ pub(super) async fn start_upload( // try to GET register if we have enough buffer. // The results of the get & push register steps are used to fill up `pending_to_get_store` cost // Since the get store cost list is the init state, we don't have to check if it is not full. - while !uploader.pending_to_get_register.is_empty() - && uploader.on_going_get_register.len() < uploader.cfg.batch_size + #[cfg(feature = "registers")] { - if let Some(reg_addr) = uploader.pending_to_get_register.pop() { - trace!("Conditions met for GET registers {:?}", reg_addr.xorname()); - let _ = uploader.on_going_get_register.insert(reg_addr.xorname()); - interface.submit_get_register_task( - uploader.client.clone(), - reg_addr, - task_result_sender.clone(), - ); + while !uploader.pending_to_get_register.is_empty() + && uploader.on_going_get_register.len() < uploader.cfg.batch_size + { + if let Some(reg_addr) = uploader.pending_to_get_register.pop() { + trace!("Conditions met for GET registers {:?}", reg_addr.xorname()); + let _ = uploader.on_going_get_register.insert(reg_addr.xorname()); + interface.submit_get_register_task( + uploader.client.clone(), + reg_addr, + task_result_sender.clone(), + ); + } } } // try to push register if we have enough buffer. // No other checks for the same reason as the above step. - while !uploader.pending_to_push_register.is_empty() - && uploader.on_going_get_register.len() < uploader.cfg.batch_size + #[cfg(feature = "registers")] { - let upload_item = uploader.pop_item_for_push_register()?; - trace!( - "Conditions met for push registers {:?}", - upload_item.xorname() - ); - let _ = uploader - .on_going_push_register - .insert(upload_item.xorname()); - interface.submit_push_register_task( - uploader.client.clone(), - upload_item, - uploader.cfg.verify_store, - task_result_sender.clone(), - ); + while !uploader.pending_to_push_register.is_empty() + && uploader.on_going_get_register.len() < uploader.cfg.batch_size + { + let upload_item = uploader.pop_item_for_push_register()?; + trace!( + "Conditions met for push registers {:?}", + upload_item.xorname() + ); + let _ = uploader + .on_going_push_register + .insert(upload_item.xorname()); + interface.submit_push_register_task( + uploader.client.clone(), + upload_item, + uploader.cfg.verify_store, + task_result_sender.clone(), + ); + } } // try to get store cost for an item if pending_to_pay needs items & if we have enough buffer. @@ -278,6 +287,7 @@ pub(super) async fn start_upload( .ok_or(UploadError::InternalError)?; trace!("Received task result: {task_result:?}"); match task_result { + #[cfg(feature = "registers")] TaskResult::GetRegisterFromNetworkOk { remote_register } => { // if we got back the register, then merge & PUT it. let xorname = remote_register.address().xorname(); @@ -295,6 +305,7 @@ pub(super) async fn start_upload( uploader.pending_to_push_register.push(xorname); } } + #[cfg(feature = "registers")] TaskResult::GetRegisterFromNetworkErr(xorname) => { // then the register is a new one. It can follow the same flow as chunks now. let _ = uploader.on_going_get_register.remove(&xorname); @@ -303,6 +314,7 @@ pub(super) async fn start_upload( .pending_to_get_store_cost .push((xorname, GetStoreCostStrategy::Cheapest)); } + #[cfg(feature = "registers")] TaskResult::PushRegisterOk { updated_register } => { // push modifies the register, so we return this instead of the one from all_upload_items let xorname = updated_register.address().xorname(); @@ -327,6 +339,7 @@ pub(super) async fn start_upload( } uploader.emit_upload_event(UploadEvent::RegisterUpdated(updated_register)); } + #[cfg(feature = "registers")] TaskResult::PushRegisterErr(xorname) => { // the register failed to be Pushed. Retry until failure. let _ = uploader.on_going_push_register.remove(&xorname); @@ -366,7 +379,7 @@ pub(super) async fn start_upload( address, )); } - + #[cfg(feature = "registers")] UploadItem::Register { reg, .. } => { if uploader.cfg.collect_registers { let _ = uploader @@ -477,6 +490,7 @@ pub(super) async fn start_upload( UploadItem::Chunk { address, .. } => { uploader.emit_upload_event(UploadEvent::ChunkUploaded(address)); } + #[cfg(feature = "registers")] UploadItem::Register { reg, .. } => { if uploader.cfg.collect_registers { let _ = uploader @@ -591,6 +605,7 @@ impl UploaderInterface for Uploader { }); } + #[cfg(feature = "registers")] fn submit_get_register_task( &mut self, client: Client, @@ -617,6 +632,7 @@ impl UploaderInterface for Uploader { }); } + #[cfg(feature = "registers")] fn submit_push_register_task( &mut self, client: Client, @@ -719,7 +735,9 @@ pub(super) struct InnerUploader { // states pub(super) all_upload_items: HashMap, + #[cfg(feature = "registers")] pub(super) pending_to_get_register: Vec, + #[cfg(feature = "registers")] pub(super) pending_to_push_register: Vec, pub(super) pending_to_get_store_cost: Vec<(XorName, GetStoreCostStrategy)>, pub(super) pending_to_pay: Vec<(XorName, Box)>, @@ -727,7 +745,9 @@ pub(super) struct InnerUploader { pub(super) payment_proofs: HashMap>, // trackers + #[cfg(feature = "registers")] pub(super) on_going_get_register: HashSet, + #[cfg(feature = "registers")] pub(super) on_going_push_register: HashSet, pub(super) on_going_get_cost: HashSet, pub(super) on_going_payments: HashSet, @@ -735,6 +755,7 @@ pub(super) struct InnerUploader { // error trackers pub(super) n_errors_during_uploads: HashMap, + #[cfg(feature = "registers")] pub(super) push_register_errors: usize, pub(super) get_store_cost_errors: usize, pub(super) make_payments_errors: usize, @@ -744,6 +765,7 @@ pub(super) struct InnerUploader { pub(super) upload_final_balance: Amount, pub(super) max_repayments_reached: HashSet, pub(super) uploaded_addresses: HashSet, + #[cfg(feature = "registers")] pub(super) uploaded_registers: HashMap, pub(super) uploaded_count: usize, pub(super) skipped_count: usize, @@ -767,20 +789,25 @@ impl InnerUploader { wallet, all_upload_items: Default::default(), + #[cfg(feature = "registers")] pending_to_get_register: Default::default(), + #[cfg(feature = "registers")] pending_to_push_register: Default::default(), pending_to_get_store_cost: Default::default(), pending_to_pay: Default::default(), pending_to_upload: Default::default(), payment_proofs: Default::default(), + #[cfg(feature = "registers")] on_going_get_register: Default::default(), + #[cfg(feature = "registers")] on_going_push_register: Default::default(), on_going_get_cost: Default::default(), on_going_payments: Default::default(), on_going_uploads: Default::default(), n_errors_during_uploads: Default::default(), + #[cfg(feature = "registers")] push_register_errors: Default::default(), get_store_cost_errors: Default::default(), max_repayments_reached: Default::default(), @@ -789,6 +816,7 @@ impl InnerUploader { tokens_spent: Amount::from(0), upload_final_balance: Amount::from(0), uploaded_addresses: Default::default(), + #[cfg(feature = "registers")] uploaded_registers: Default::default(), uploaded_count: Default::default(), skipped_count: Default::default(), @@ -801,6 +829,7 @@ impl InnerUploader { // ====== Pop items ====== + #[cfg(feature = "registers")] fn pop_item_for_push_register(&mut self) -> Result { if let Some(name) = self.pending_to_push_register.pop() { let upload_item = self.all_upload_items.get(&name).cloned().ok_or_else(|| { @@ -970,11 +999,13 @@ impl InnerUploader { // ====== Logic ====== + #[cfg(feature = "registers")] async fn get_register(client: Client, reg_addr: RegisterAddress) -> Result { let reg = client.register_get(reg_addr).await?; Ok(reg) } + #[cfg(feature = "registers")] async fn push_register( client: Client, upload_item: UploadItem, @@ -1123,6 +1154,7 @@ impl InnerUploader { debug!("Client upload completed for chunk: {xorname:?}"); } + #[cfg(feature = "registers")] UploadItem::Register { address: _, reg } => { debug!("Client upload started for register: {xorname:?}"); let verification = if verify_store { From de22bb7ba76dbbfbf352d0aee3b551d47c6e63e1 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 18 Oct 2024 20:52:01 +0530 Subject: [PATCH 085/128] feat(ci): enable autonomi unit tests --- .github/workflows/merge.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index fa94260975..ee6b1f8033 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -110,6 +110,10 @@ jobs: - uses: Swatinem/rust-cache@v2 + - name: Run autonomi tests + timeout-minutes: 25 + run: cargo test --release --package autonomi --lib --features="full,fs" + - name: Run node tests timeout-minutes: 25 run: cargo test --release --package sn_node --lib @@ -188,7 +192,7 @@ jobs: # only these unit tests require a network, the rest are run above - name: Run autonomi --tests - run: cargo test --package autonomi --tests -- --nocapture + run: cargo test --package autonomi --features="full,fs" --tests -- --nocapture env: SN_LOG: "v" # only set the target dir for windows to bypass the linker issue. From e7ee28013eb540cf461c3fb80dfbcabac29c71c4 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 18 Oct 2024 21:09:00 +0530 Subject: [PATCH 086/128] feat(uploader): feature gate data --- autonomi/src/uploader/mod.rs | 17 +++++++++----- autonomi/src/uploader/upload.rs | 40 ++++++++++++++++++--------------- 2 files changed, 34 insertions(+), 23 deletions(-) diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs index 493dbefcbf..40dc6c7668 100644 --- a/autonomi/src/uploader/mod.rs +++ b/autonomi/src/uploader/mod.rs @@ -18,10 +18,9 @@ use sn_evm::EvmWallet; use sn_evm::{Amount, EvmNetworkTokenError, ProofOfPayment}; use sn_networking::target_arch::{mpsc, mpsc_channel}; use sn_networking::{NetworkError, PayeeQuote}; -use sn_protocol::{ - storage::{Chunk, ChunkAddress, RetryStrategy}, - NetworkAddress, -}; +#[cfg(feature = "data")] +use sn_protocol::storage::{Chunk, ChunkAddress}; +use sn_protocol::{storage::RetryStrategy, NetworkAddress}; #[cfg(feature = "registers")] use sn_registers::RegisterAddress; use std::{ @@ -176,13 +175,14 @@ impl UploadSummary { /// The events emitted from the upload process. pub enum UploadEvent { /// Uploaded a record to the network. + #[cfg(feature = "data")] ChunkUploaded(ChunkAddress), /// Uploaded a Register to the network. /// The returned register is just the passed in register. #[cfg(feature = "registers")] RegisterUploaded(Register), - /// /// The Chunk already exists in the network. No payments were made. + #[cfg(feature = "data")] ChunkAlreadyExistsInNetwork(ChunkAddress), /// The Register already exists in the network. The locally register changes were pushed to the network. /// No payments were made. @@ -334,6 +334,7 @@ impl Uploader { } /// Insert a list of chunk paths to upload to upload. + #[cfg(feature = "fs")] pub fn insert_chunk_paths(&mut self, chunks: impl IntoIterator) { self.inner .as_mut() @@ -342,6 +343,7 @@ impl Uploader { } /// Insert a list of chunks to upload to upload. + #[cfg(feature = "data")] pub fn insert_chunks(&mut self, chunks: impl IntoIterator) { self.inner .as_mut() @@ -451,6 +453,7 @@ impl InnerUploader { rx } + #[cfg(feature = "fs")] pub(super) fn insert_chunk_paths( &mut self, chunks: impl IntoIterator, @@ -465,6 +468,7 @@ impl InnerUploader { })); } + #[cfg(feature = "data")] pub(super) fn insert_chunks(&mut self, chunks: impl IntoIterator) { self.all_upload_items .extend(chunks.into_iter().map(|chunk| { @@ -490,6 +494,7 @@ impl InnerUploader { #[derive(Debug, Clone)] enum UploadItem { + #[cfg(feature = "data")] Chunk { address: ChunkAddress, // Either the actual chunk or the path to the chunk. @@ -505,6 +510,7 @@ enum UploadItem { impl UploadItem { fn address(&self) -> NetworkAddress { match self { + #[cfg(feature = "data")] Self::Chunk { address, .. } => NetworkAddress::from_chunk_address(*address), #[cfg(feature = "registers")] Self::Register { address, .. } => NetworkAddress::from_register_address(*address), @@ -513,6 +519,7 @@ impl UploadItem { fn xorname(&self) -> XorName { match self { + #[cfg(feature = "data")] UploadItem::Chunk { address, .. } => *address.xorname(), #[cfg(feature = "registers")] UploadItem::Register { address, .. } => address.xorname(), diff --git a/autonomi/src/uploader/upload.rs b/autonomi/src/uploader/upload.rs index c0e448a222..f12b493332 100644 --- a/autonomi/src/uploader/upload.rs +++ b/autonomi/src/uploader/upload.rs @@ -23,11 +23,9 @@ use rand::{thread_rng, Rng}; use sn_evm::{Amount, EvmWallet, ProofOfPayment}; use sn_networking::target_arch::{mpsc, mpsc_channel, mpsc_recv, spawn}; use sn_networking::{GetRecordCfg, PayeeQuote, PutRecordCfg, VerificationKind}; -use sn_protocol::{ - messages::ChunkProof, - storage::{Chunk, RetryStrategy}, - NetworkAddress, -}; +#[cfg(feature = "data")] +use sn_protocol::{messages::ChunkProof, storage::Chunk}; +use sn_protocol::{storage::RetryStrategy, NetworkAddress}; #[cfg(feature = "registers")] use sn_registers::RegisterAddress; use std::{ @@ -91,17 +89,20 @@ pub(super) async fn start_upload( )?; // chunks can be pushed to pending_get_store_cost directly - uploader.pending_to_get_store_cost = uploader - .all_upload_items - .iter() - .filter_map(|(xorname, item)| { - if let UploadItem::Chunk { .. } = item { - Some((*xorname, GetStoreCostStrategy::Cheapest)) - } else { - None - } - }) - .collect(); + #[cfg(feature = "data")] + { + uploader.pending_to_get_store_cost = uploader + .all_upload_items + .iter() + .filter_map(|(xorname, item)| { + if let UploadItem::Chunk { .. } = item { + Some((*xorname, GetStoreCostStrategy::Cheapest)) + } else { + None + } + }) + .collect(); + } // registers have to be verified + merged with remote replica, so we have to fetch it first. #[cfg(feature = "registers")] @@ -374,6 +375,7 @@ pub(super) async fn start_upload( // if during the first try we skip the item, then it is already present in the network. match removed_item { + #[cfg(feature = "data")] UploadItem::Chunk { address, .. } => { uploader.emit_upload_event(UploadEvent::ChunkAlreadyExistsInNetwork( address, @@ -487,6 +489,7 @@ pub(super) async fn start_upload( let _ = uploader.uploaded_addresses.insert(removed_item.address()); match removed_item { + #[cfg(feature = "data")] UploadItem::Chunk { address, .. } => { uploader.emit_upload_event(UploadEvent::ChunkUploaded(address)); } @@ -974,9 +977,9 @@ impl InnerUploader { } } }; - let pay_for_chunk_sender_clone = task_result_sender.clone(); + let result_sender = task_result_sender.clone(); let _handle = spawn(async move { - let _ = pay_for_chunk_sender_clone.send(result).await; + let _ = result_sender.send(result).await; }); cost_map = HashMap::new(); @@ -1100,6 +1103,7 @@ impl InnerUploader { debug!("Payments for upload item: {xorname:?} to {payee:?}: {payment_proof:?}"); match upload_item { + #[cfg(feature = "data")] UploadItem::Chunk { address: _, chunk } => { let chunk = match chunk { Either::Left(chunk) => chunk, From 40390bc0d902522ee6ea2eb077f94dec7727804f Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 18 Oct 2024 21:44:33 +0530 Subject: [PATCH 087/128] chore(uploader): add docs and small fixes --- autonomi/src/uploader/mod.rs | 25 +++++++++++++++++++------ autonomi/src/uploader/upload.rs | 15 ++++----------- autonomi/tests/fs.rs | 2 +- 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs index 40dc6c7668..6c906a4b2a 100644 --- a/autonomi/src/uploader/mod.rs +++ b/autonomi/src/uploader/mod.rs @@ -6,6 +6,22 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +//! The uploader module provides an interface to upload data to the network, with the ability to retry failed uploads, +//! make repayments for failed payments, and verify the data after it has been uploaded. +//! +//! # Example +//! let mut uploader = Uploader::new(self.clone(), wallet.clone()); +//! uploader.insert_chunks(chunks); +//! uploader.insert_chunks(vec![data_map_chunk]); +//! let summary = uploader.start_upload().await?; +//! +//! # Configuration +//! The `Uploader` can be configured using the `UploadCfg` struct. The most notable options are the `batch_size` and +//! `payment_batch_size` which determine the number of data that are processed in parallel and the number of payments +//! that are made in a single evm transaction, respectively. +//! Also the `max_repayments_for_failed_data` option determines the maximum number of repayments to make if the +//! initial payment fails. + #[cfg(test)] mod tests; mod upload; @@ -56,11 +72,8 @@ pub enum UploadError { InvalidCfg(String), #[error("I/O error: {0:?}")] Io(#[from] std::io::Error), - #[error("The upload failed with maximum repayments reached for multiple items: {items:?} Summary: {summary:?}")] - MaximumRepaymentsReached { - items: Vec, - summary: UploadSummary, - }, + #[error("The upload failed with maximum repayments reached for multiple items: {items:?}")] + MaximumRepaymentsReached { items: Vec }, #[error("Network error: {0:?}")] Network(#[from] NetworkError), #[cfg(feature = "registers")] @@ -303,7 +316,7 @@ impl Uploader { /// Sets the maximum number of repayments to perform if the initial payment failed. /// NOTE: This creates an extra Spend and uses the wallet funds. /// - /// By default, this option is set to `1` retry. + /// By default, this option is set to `3` repayments. pub fn set_max_repayments_for_failed_data(&mut self, retries: usize) { self.inner .as_mut() diff --git a/autonomi/src/uploader/upload.rs b/autonomi/src/uploader/upload.rs index f12b493332..ea1f812d70 100644 --- a/autonomi/src/uploader/upload.rs +++ b/autonomi/src/uploader/upload.rs @@ -6,9 +6,6 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -// TODO: fix -#![allow(clippy::result_large_err)] - use super::{ GetStoreCostStrategy, TaskResult, UploadCfg, UploadEvent, UploadItem, UploadSummary, Uploader, UploaderInterface, PAYMENT_BATCH_SIZE, @@ -56,12 +53,10 @@ const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 1; type Result = std::result::Result; // TODO: -// 1. since wallet balance is not fetched after finishing a task, get it before we send OK/Err -// 2. Rework client event, it should be sent via the lowest level of the PUT. while for chunks it is done earlier (data.rs) -// 3. track each batch with an id -// 4. create a irrecoverable error type, so we can bail on io/serialization etc. -// 5. separate cfgs/retries for register/chunk etc -// 1. log whenever we insert/remove items. i.e., don't ignore values with `let _` +// 1. track each batch with an id +// 2. create a irrecoverable error type, so we can bail on io/serialization etc. +// 3. separate cfgs/retries for register/chunk etc +// 4. log whenever we insert/remove items. i.e., don't ignore values with `let _` /// The main loop that performs the upload process. /// An interface is passed here for easy testing. @@ -156,7 +151,6 @@ pub(super) async fn start_upload( ); return Err(UploadError::MaximumRepaymentsReached { items: uploader.max_repayments_reached.into_iter().collect(), - summary, }); } @@ -1067,7 +1061,6 @@ impl InnerUploader { // error is used by the caller. return Err(UploadError::MaximumRepaymentsReached { items: vec![xorname], - summary: UploadSummary::default(), }); } diff --git a/autonomi/tests/fs.rs b/autonomi/tests/fs.rs index b952852bc2..70787dee0f 100644 --- a/autonomi/tests/fs.rs +++ b/autonomi/tests/fs.rs @@ -97,7 +97,7 @@ async fn file_into_vault() -> Result<()> { .await?; // now assert over the stored account packet - let new_client = Client::connect(&[]).await?; + let new_client = Client::connect(&peers_from_env()?).await?; let (ap, got_version) = new_client.fetch_and_decrypt_vault(&client_sk).await?; assert_eq!(set_version, got_version); From a46c666f72d5299012b3f46bcd284db2d6fec919 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Thu, 24 Oct 2024 15:36:12 +0530 Subject: [PATCH 088/128] chore: update based on comments --- autonomi/src/client/registers.rs | 2 +- autonomi/src/client/utils.rs | 6 ++++-- autonomi/src/uploader/mod.rs | 14 ++++++++++---- autonomi/src/uploader/upload.rs | 17 +++++++---------- 4 files changed, 22 insertions(+), 17 deletions(-) diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 76e3044730..a17bffb147 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -312,7 +312,7 @@ impl Client { let mut uploader = Uploader::new(self.clone(), wallet.clone()); uploader.insert_register(vec![register]); - uploader.set_collect_registers(true); + uploader.collect_registers(true); let summary = uploader.start_upload().await?; diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index 4515adeca9..ec6515a78b 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -23,7 +23,7 @@ use sn_networking::{ use sn_protocol::{ messages::ChunkProof, storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind, RetryStrategy}, - NetworkAddress, + NetworkAddress, CLOSE_GROUP_SIZE, }; use std::{collections::HashMap, num::NonZero}; use xor_name::XorName; @@ -114,7 +114,9 @@ impl Client { } else { let verification = { let verification_cfg = GetRecordCfg { - get_quorum: Quorum::N(NonZero::new(2).expect("2 is non-zero")), + get_quorum: Quorum::N( + NonZero::new(CLOSE_GROUP_SIZE / 2).expect("2 is non-zero"), + ), retry_strategy: Some(RetryStrategy::Quick), target_record: None, expected_holders: Default::default(), diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs index 6c906a4b2a..cd5f7e4fc2 100644 --- a/autonomi/src/uploader/mod.rs +++ b/autonomi/src/uploader/mod.rs @@ -330,7 +330,7 @@ impl Uploader { /// /// By default, this option is set to `False` #[cfg(feature = "registers")] - pub fn set_collect_registers(&mut self, collect_registers: bool) { + pub fn collect_registers(&mut self, collect_registers: bool) { self.inner .as_mut() .expect("Uploader::new makes sure inner is present") @@ -346,7 +346,9 @@ impl Uploader { .get_event_receiver() } - /// Insert a list of chunk paths to upload to upload. + /// Insert a list of chunk paths into the uploader. + /// + /// Use `start_upload` to start the upload process. #[cfg(feature = "fs")] pub fn insert_chunk_paths(&mut self, chunks: impl IntoIterator) { self.inner @@ -355,7 +357,9 @@ impl Uploader { .insert_chunk_paths(chunks); } - /// Insert a list of chunks to upload to upload. + /// Insert a list of chunks into the uploader. + /// + /// Use `start_upload` to start the upload process. #[cfg(feature = "data")] pub fn insert_chunks(&mut self, chunks: impl IntoIterator) { self.inner @@ -364,7 +368,9 @@ impl Uploader { .insert_chunks(chunks); } - /// Insert a list of registers to upload. + /// Insert a list of registers into the uploader. To get the updated registers, set `collect_registers` to true. + /// + /// Use `start_upload` to start the upload process. #[cfg(feature = "registers")] pub fn insert_register(&mut self, registers: impl IntoIterator) { self.inner diff --git a/autonomi/src/uploader/upload.rs b/autonomi/src/uploader/upload.rs index ea1f812d70..dac748549c 100644 --- a/autonomi/src/uploader/upload.rs +++ b/autonomi/src/uploader/upload.rs @@ -133,8 +133,7 @@ pub(super) async fn start_upload( })?; } - #[cfg(test)] - trace!("UPLOADER STATE: finished uploading all items {uploader:?}"); + debug!("UPLOADER STATE: finished uploading all items {uploader:?}"); let summary = UploadSummary { storage_cost: uploader.tokens_spent, final_balance: uploader.upload_final_balance, @@ -157,11 +156,11 @@ pub(super) async fn start_upload( return Ok(summary); } - // try to GET register if we have enough buffer. - // The results of the get & push register steps are used to fill up `pending_to_get_store` cost - // Since the get store cost list is the init state, we don't have to check if it is not full. #[cfg(feature = "registers")] { + // try to GET register if we have enough buffer. + // The results of the get & push register steps are used to fill up `pending_to_get_store` cost + // Since the get store cost list is the init state, we don't have to check if it is not full. while !uploader.pending_to_get_register.is_empty() && uploader.on_going_get_register.len() < uploader.cfg.batch_size { @@ -175,12 +174,10 @@ pub(super) async fn start_upload( ); } } - } - // try to push register if we have enough buffer. - // No other checks for the same reason as the above step. - #[cfg(feature = "registers")] - { + // try to push register if we have enough buffer. + // No other checks for the same reason as the above step. + while !uploader.pending_to_push_register.is_empty() && uploader.on_going_get_register.len() < uploader.cfg.batch_size { From 19b847177486685aef87b32808acd7333c6e6e4c Mon Sep 17 00:00:00 2001 From: qima Date: Fri, 25 Oct 2024 19:20:54 +0800 Subject: [PATCH 089/128] chore(network): logging multiple fetched multiple versions --- sn_networking/src/event/kad.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 776d868e0d..de90a187d6 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -390,6 +390,7 @@ impl SwarmDriver { // Insert the record and the peer into the result_map. let record_content_hash = XorName::from_content(&peer_record.record.value); + debug!("For record {pretty_key:?} task {query_id:?}, received a copy {peer_id:?} with content hash {record_content_hash:?}"); let peer_list = if let Entry::Occupied(mut entry) = result_map.entry(record_content_hash) { @@ -410,7 +411,7 @@ impl SwarmDriver { let responded_peers = peer_list.len(); - let expected_answers = get_quorum_value(&cfg.get_quorum); + let expected_answers = cfg.get_quorum; trace!("Expecting {expected_answers:?} answers to exceed {expected_get_range:?} for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); } else { // return error if the entry cannot be found @@ -511,7 +512,7 @@ impl SwarmDriver { // we have a split record, return it if num_of_versions > 1 { - warn!("RANGE: Multiple versions found over range"); + warn!("RANGE: Multiple versions ({num_of_versions}) found over range"); for sender in senders { sender .send(Err(GetRecordError::SplitRecord { From 9a243fd028658cf67f51bfd4984945040db0fd24 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Fri, 25 Oct 2024 12:43:37 +0200 Subject: [PATCH 090/128] fix(launchpad): update status after resetting --- node-launchpad/src/components/status.rs | 27 +++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 3feb403485..a8f4adabee 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -46,6 +46,7 @@ use std::{ time::{Duration, Instant}, vec, }; +use strum::Display; use tokio::sync::mpsc::UnboundedSender; use super::super::node_mgmt::{maintain_n_running_nodes, reset_nodes, stop_nodes}; @@ -105,7 +106,7 @@ pub struct Status<'a> { error_popup: Option, } -#[derive(Clone)] +#[derive(Clone, Display, Debug)] pub enum LockRegistryState { StartingNodes, StoppingNodes, @@ -273,6 +274,14 @@ impl Status<'_> { Ok(()) } + fn clear_node_items(&mut self) { + debug!("Cleaning items on Status page"); + if let Some(items) = self.items.as_mut() { + items.items.clear(); + debug!("Cleared the items on status page"); + } + } + /// Tries to trigger the update of node stats if the last update was more than `NODE_STAT_UPDATE_INTERVAL` ago. /// The result is sent via the StatusActions::NodesStatsObtained action. fn try_update_node_stats(&mut self, force_update: bool) -> Result<()> { @@ -426,6 +435,7 @@ impl Component for Status<'_> { StatusActions::ResetNodesCompleted { trigger_start_node } => { self.lock_registry = None; self.load_node_registry_and_update_states()?; + self.clear_node_items(); if trigger_start_node { debug!("Reset nodes completed. Triggering start nodes."); @@ -515,7 +525,10 @@ impl Component for Status<'_> { } if self.lock_registry.is_some() { - error!("Registry is locked. Cannot start node now."); + error!( + "Registry is locked ({:?}) Cannot Start nodes now.", + self.lock_registry + ); return Ok(None); } @@ -549,7 +562,10 @@ impl Component for Status<'_> { StatusActions::StopNodes => { debug!("Got action to stop nodes"); if self.lock_registry.is_some() { - error!("Registry is locked. Cannot stop node now."); + error!( + "Registry is locked ({:?}) Cannot Stop nodes now.", + self.lock_registry + ); return Ok(None); } @@ -572,7 +588,10 @@ impl Component for Status<'_> { Action::OptionsActions(OptionsActions::ResetNodes) => { debug!("Got action to reset nodes"); if self.lock_registry.is_some() { - error!("Registry is locked. Cannot reset nodes now."); + error!( + "Registry is locked ({:?}) Cannot Reset nodes now.", + self.lock_registry + ); return Ok(None); } From 793f584e9a3ddf1cc7b43424e5746d7b91e20bcf Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Fri, 25 Oct 2024 15:25:05 +0200 Subject: [PATCH 091/128] fix(launchpad): rewards address req to start nodes --- node-launchpad/src/components/status.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 3feb403485..0e4e7a85b8 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -507,6 +507,13 @@ impl Component for Status<'_> { StatusActions::StartNodes => { debug!("Got action to start nodes"); + if self.rewards_address.is_empty() { + info!("Rewards address is not set. Ask for input."); + return Ok(Some(Action::StatusActions( + StatusActions::TriggerRewardsAddress, + ))); + } + if self.nodes_to_start == 0 { info!("Nodes to start not set. Ask for input."); return Ok(Some(Action::StatusActions( @@ -720,7 +727,7 @@ impl Component for Status<'_> { // No nodes. Empty Table. if let Some(ref items) = self.items { - if items.items.is_empty() { + if items.items.is_empty() || self.rewards_address.is_empty() { let line1 = Line::from(vec![ Span::styled("Press ", Style::default().fg(LIGHT_PERIWINKLE)), Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE).bold()), @@ -833,7 +840,7 @@ impl Component for Status<'_> { let footer = Footer::default(); let footer_state = if let Some(ref items) = self.items { - if !items.items.is_empty() { + if !items.items.is_empty() || self.rewards_address.is_empty() { if !self.get_running_nodes().is_empty() { &mut NodesToStart::Running } else { From 4f02f66670ebdec75a44f95d592db23c9ecca752 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Fri, 25 Oct 2024 17:46:48 +0100 Subject: [PATCH 092/128] feat: print rewards address in `status --details` This is a utility for the user's reference. The owner name field is removed from the output. It doesn't serve much purpose without the Discord link. --- sn_node_manager/src/lib.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 721015ed2f..a71e7b6b4e 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -424,12 +424,7 @@ pub async fn status_report( node.reward_balance .map_or("-".to_string(), |b| b.to_string()) ); - println!( - "Owner: {}", - node.owner - .as_ref() - .map_or("-".to_string(), |o| o.to_string()) - ); + println!("Rewards address: {}", node.rewards_address); println!(); } From dc243ad7666ac555db8ad1ad34e5ed790dc3118b Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Fri, 25 Oct 2024 20:37:00 +0200 Subject: [PATCH 093/128] fix(launchpad): throbbler stops after a while --- node-launchpad/src/components/status.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 3feb403485..fb4df4176d 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -179,8 +179,7 @@ impl Status<'_> { // Update status based on current node status item.status = match node_item.status { ServiceStatus::Running => { - // Call calc_next on the spinner state - item.spinner_state.calc_next(); + NodeItem::update_spinner_state(&mut item.spinner_state); NodeStatus::Running } ServiceStatus::Stopped => NodeStatus::Stopped, @@ -190,7 +189,7 @@ impl Status<'_> { // Starting is not part of ServiceStatus so we do it manually if let Some(LockRegistryState::StartingNodes) = self.lock_registry { - item.spinner_state.calc_next(); + NodeItem::update_spinner_state(&mut item.spinner_state); item.status = NodeStatus::Starting; } @@ -1029,6 +1028,16 @@ pub struct NodeItem<'a> { } impl NodeItem<'_> { + fn update_spinner_state(state: &mut ThrobberState) { + // Call calc_next on the spinner state + // https://github.com/arkbig/throbber-widgets-tui/issues/19 + if state.index() == i8::MAX { + *state = ThrobberState::default(); + } else { + state.calc_next(); + } + } + fn render_as_row(&mut self, index: usize, area: Rect, f: &mut Frame<'_>) -> Row { let mut row_style = Style::default().fg(GHOST_WHITE); let mut spinner_state = self.spinner_state.clone(); @@ -1100,7 +1109,7 @@ impl NodeItem<'_> { ), self.status.to_string(), ]; - let throbber_area = Rect::new(area.width - 2, area.y + 2 + index as u16, 1, 1); + let throbber_area = Rect::new(area.width - 3, area.y + 2 + index as u16, 1, 1); f.render_stateful_widget(self.spinner.clone(), throbber_area, &mut spinner_state); From 6184db98f2fd299ec0954984f5698de0c5878c2d Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Fri, 25 Oct 2024 22:41:23 +0100 Subject: [PATCH 094/128] chore(release): release candidate 2024.10.4.1 ================== Crate Versions ================== autonomi: 0.2.2-rc.1 autonomi-cli: 0.1.3-rc.1 evmlib: 0.1.2-rc.1 evm_testnet: 0.1.2-rc.1 sn_build_info: 0.1.17-rc.1 sn_evm: 0.1.2-rc.1 sn_logging: 0.2.38-rc.1 sn_metrics: 0.1.18-rc.1 nat-detection: 0.2.9-rc.1 sn_networking: 0.19.1-rc.1 sn_node: 0.112.2-rc.1 node-launchpad: 0.4.2-rc.1 sn_node_manager: 0.11.1-rc.1 sn_node_rpc_client: 0.6.33-rc.1 sn_peers_acquisition: 0.5.5-rc.1 sn_protocol: 0.17.13-rc.1 sn_registers: 0.4.1-rc.1 sn_service_management: 0.4.1-rc.1 sn_transfers: 0.20.1-rc.1 test_utils: 0.4.9-rc.1 token_supplies: 0.1.56-rc.1 =================== Binary Versions =================== nat-detection: 0.2.9-rc.1 node-launchpad: 0.4.2-rc.1 autonomi: 0.1.3-rc.1 safenode: 0.112.2-rc.1 safenode-manager: 0.11.1-rc.1 safenode_rpc_client: 0.6.33-rc.1 safenodemand: 0.11.1-rc.1 --- Cargo.lock | 42 +++++++++++++++---------------- autonomi-cli/Cargo.toml | 12 ++++----- autonomi/Cargo.toml | 20 +++++++-------- evm_testnet/Cargo.toml | 6 ++--- evmlib/Cargo.toml | 2 +- nat-detection/Cargo.toml | 8 +++--- node-launchpad/Cargo.toml | 14 +++++------ release-cycle-info | 4 +-- sn_build_info/Cargo.toml | 2 +- sn_build_info/src/release_info.rs | 5 ++-- sn_evm/Cargo.toml | 4 +-- sn_logging/Cargo.toml | 2 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 12 ++++----- sn_node/Cargo.toml | 28 ++++++++++----------- sn_node_manager/Cargo.toml | 16 ++++++------ sn_node_rpc_client/Cargo.toml | 16 ++++++------ sn_peers_acquisition/Cargo.toml | 4 +-- sn_protocol/Cargo.toml | 10 ++++---- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 8 +++--- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 6 ++--- token_supplies/Cargo.toml | 2 +- 24 files changed, 114 insertions(+), 115 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a9e73bd939..9720dd2245 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1088,7 +1088,7 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "autonomi" -version = "0.2.1" +version = "0.2.2-rc.1" dependencies = [ "alloy", "assert_matches", @@ -1139,7 +1139,7 @@ dependencies = [ [[package]] name = "autonomi-cli" -version = "0.1.2" +version = "0.1.3-rc.1" dependencies = [ "autonomi", "clap", @@ -2792,7 +2792,7 @@ dependencies = [ [[package]] name = "evm_testnet" -version = "0.1.1" +version = "0.1.2-rc.1" dependencies = [ "clap", "dirs-next", @@ -2803,7 +2803,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.1" +version = "0.1.2-rc.1" dependencies = [ "alloy", "dirs-next", @@ -5613,7 +5613,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.8" +version = "0.2.9-rc.1" dependencies = [ "clap", "clap-verbosity-flag", @@ -5730,7 +5730,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.4.1" +version = "0.4.2-rc.1" dependencies = [ "atty", "better-panic", @@ -8095,7 +8095,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.11.0" +version = "0.11.1-rc.1" dependencies = [ "assert_cmd", "assert_fs", @@ -8171,7 +8171,7 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.16" +version = "0.1.17-rc.1" dependencies = [ "chrono", "tracing", @@ -8213,7 +8213,7 @@ dependencies = [ [[package]] name = "sn_evm" -version = "0.1.1" +version = "0.1.2-rc.1" dependencies = [ "custom_debug", "evmlib", @@ -8236,7 +8236,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.37" +version = "0.2.38-rc.1" dependencies = [ "chrono", "color-eyre", @@ -8261,7 +8261,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.17" +version = "0.1.18-rc.1" dependencies = [ "clap", "color-eyre", @@ -8275,7 +8275,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.19.0" +version = "0.19.1-rc.1" dependencies = [ "aes-gcm-siv", "async-channel", @@ -8321,7 +8321,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.112.1" +version = "0.112.2-rc.1" dependencies = [ "assert_fs", "async-trait", @@ -8378,7 +8378,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.32" +version = "0.6.33-rc.1" dependencies = [ "assert_fs", "async-trait", @@ -8405,7 +8405,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.5.4" +version = "0.5.5-rc.1" dependencies = [ "clap", "lazy_static", @@ -8421,7 +8421,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.12" +version = "0.17.13-rc.1" dependencies = [ "blsttc", "bytes", @@ -8451,7 +8451,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.4.0" +version = "0.4.1-rc.1" dependencies = [ "blsttc", "crdts", @@ -8468,7 +8468,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.4.0" +version = "0.4.1-rc.1" dependencies = [ "async-trait", "dirs-next", @@ -8494,7 +8494,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.20.0" +version = "0.20.1-rc.1" dependencies = [ "assert_fs", "blsttc", @@ -8827,7 +8827,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.8" +version = "0.4.9-rc.1" dependencies = [ "bytes", "color-eyre", @@ -8971,7 +8971,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.55" +version = "0.1.56-rc.1" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index 7e71a4a841..0fb8b720f3 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] name = "autonomi-cli" description = "Autonomi CLI" license = "GPL-3.0" -version = "0.1.2" +version = "0.1.3-rc.1" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -24,7 +24,7 @@ name = "files" harness = false [dependencies] -autonomi = { path = "../autonomi", version = "0.2.1", features = [ +autonomi = { path = "../autonomi", version = "0.2.2-rc.1", features = [ "data", "fs", "vault", @@ -45,15 +45,15 @@ tokio = { version = "1.32.0", features = [ "fs", ] } tracing = { version = "~0.1.26" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } walkdir = "2.5.0" serde_json = "1.0.132" serde = "1.0.210" [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.2.1", features = [ +autonomi = { path = "../autonomi", version = "0.2.2-rc.1", features = [ "data", "fs", ] } diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 2213775d78..35aacea5f9 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.2.1" +version = "0.2.2-rc.1" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -40,11 +40,11 @@ rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.19.0" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_protocol = { version = "0.17.12", path = "../sn_protocol" } -sn_registers = { path = "../sn_registers", version = "0.4.0" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_networking = { path = "../sn_networking", version = "0.19.1-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } +sn_protocol = { version = "0.17.13-rc.1", path = "../sn_protocol" } +sn_registers = { path = "../sn_registers", version = "0.4.1-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } thiserror = "1.0.23" tokio = { version = "1.35.0", features = ["sync"] } tracing = { version = "~0.1.26" } @@ -63,9 +63,9 @@ alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwe assert_matches = "1.5.0" eyre = "0.6.5" sha2 = "0.10.6" -sn_logging = { path = "../sn_logging", version = "0.2.37" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_registers = { path = "../sn_registers", version = "0.4.0", features = ["test-utils"] } +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.4.1-rc.1", features = ["test-utils"] } tempfile = "3.6.0" # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. @@ -76,7 +76,7 @@ wasm-bindgen-test = "0.3.43" [target.'cfg(target_arch = "wasm32")'.dependencies] console_error_panic_hook = "0.1.7" -evmlib = { path = "../evmlib", version = "0.1.1", features = ["wasm-bindgen"] } +evmlib = { path = "../evmlib", version = "0.1.2-rc.1", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } js-sys = "0.3.70" diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml index 6712604130..e493b1ad79 100644 --- a/evm_testnet/Cargo.toml +++ b/evm_testnet/Cargo.toml @@ -6,13 +6,13 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm_testnet" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1" +version = "0.1.2-rc.1" [dependencies] clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.1" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +evmlib = { path = "../evmlib", version = "0.1.2-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 31b4b0da5e..77079acd19 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1" +version = "0.1.2-rc.1" [features] wasm-bindgen = ["alloy/wasm-bindgen"] diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index bbab570e94..f0f1c5867b 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.8" +version = "0.2.9-rc.1" [[bin]] name = "nat-detection" @@ -31,9 +31,9 @@ libp2p = { version = "0.54.1", features = [ "macros", "upnp", ] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_networking = { path = "../sn_networking", version = "0.19.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } +sn_networking = { path = "../sn_networking", version = "0.19.1-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 51f21050fe..21c1dba24f 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.4.1" +version = "0.4.2-rc.1" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -51,13 +51,13 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } -sn-node-manager = { version = "0.11.0", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.5.4", path = "../sn_peers_acquisition" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } +sn-node-manager = { version = "0.11.1-rc.1", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.5.5-rc.1", path = "../sn_peers_acquisition" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.4.0", path = "../sn_service_management" } +sn_service_management = { version = "0.4.1-rc.1", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/release-cycle-info b/release-cycle-info index bdcd486143..96648b9415 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -14,5 +14,5 @@ # number for all the released binaries. release-year: 2024 release-month: 10 -release-cycle: 3 -release-cycle-counter: 3 +release-cycle: 4 +release-cycle-counter: 1 diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 8c5c4b7dfa..101b48ae32 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.16" +version = "0.1.17-rc.1" build = "build.rs" include = ["Cargo.toml", "src/**/*", "build.rs"] diff --git a/sn_build_info/src/release_info.rs b/sn_build_info/src/release_info.rs index 3506c9cc96..8872025ec1 100644 --- a/sn_build_info/src/release_info.rs +++ b/sn_build_info/src/release_info.rs @@ -1,5 +1,4 @@ pub const RELEASE_YEAR: &str = "2024"; pub const RELEASE_MONTH: &str = "10"; -pub const RELEASE_CYCLE: &str = "3"; -pub const RELEASE_CYCLE_COUNTER: &str = "3"; - +pub const RELEASE_CYCLE: &str = "4"; +pub const RELEASE_CYCLE_COUNTER: &str = "1"; diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index 1d0d7686e0..98e05e2973 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_evm" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1" +version = "0.1.2-rc.1" [features] test-utils = [] @@ -17,7 +17,7 @@ external-signer = ["evmlib/external-signer"] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib", version = "0.1.1" } +evmlib = { path = "../evmlib", version = "0.1.2-rc.1" } hex = "~0.4.3" lazy_static = "~1.4.0" libp2p = { version = "0.53", features = ["identify", "kad"] } diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index 090e3f8a12..b214d63c1b 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.37" +version = "0.2.38-rc.1" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index 4a550a58a8..8c166b1228 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.17" +version = "0.1.18-rc.1" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 9c76065bf0..49825bab92 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.19.0" +version = "0.19.1-rc.1" [features] default = [] @@ -54,11 +54,11 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0" } -sn_registers = { path = "../sn_registers", version = "0.4.0" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.4.1-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index e79997633a..56281251a8 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.112.1" +version = "0.112.2-rc.1" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -52,15 +52,15 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_logging = { path = "../sn_logging", version = "0.2.37" } -sn_networking = { path = "../sn_networking", version = "0.19.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12" } -sn_registers = { path = "../sn_registers", version = "0.4.0" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0" } -sn_service_management = { path = "../sn_service_management", version = "0.4.0" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } +sn_networking = { path = "../sn_networking", version = "0.19.1-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.4.1-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1" } +sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } sysinfo = { version = "0.30.8", default-features = false } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ @@ -83,16 +83,16 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] -evmlib = { path = "../evmlib", version = "0.1.1" } -autonomi = { path = "../autonomi", version = "0.2.1", features = ["registers"] } +evmlib = { path = "../evmlib", version = "0.1.2-rc.1" } +autonomi = { path = "../autonomi", version = "0.2.2-rc.1", features = ["registers"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_protocol = { path = "../sn_protocol", version = "0.17.12", features = [ +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.20.0", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 4b152994c4..b76b2111bb 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.11.0" +version = "0.11.1-rc.1" [[bin]] name = "safenode-manager" @@ -46,14 +46,14 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_logging = { path = "../sn_logging", version = "0.2.37" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12" } -sn_service_management = { path = "../sn_service_management", version = "0.4.0" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1" } +sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.1" } sn-releases = "0.2.6" -sn_evm = { path = "../sn_evm", version = "0.1.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 3b4fd86564..9c91a08764 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.32" +version = "0.6.33-rc.1" [[bin]] name = "safenode_rpc_client" @@ -26,13 +26,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_logging = { path = "../sn_logging", version = "0.2.37" } -sn_node = { path = "../sn_node", version = "0.112.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.4.0" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } +sn_node = { path = "../sn_node", version = "0.112.2-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index c8e46ee8be..7670798cc5 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.5.4" +version = "0.5.5-rc.1" [features] local = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_protocol = { path = "../sn_protocol", version = "0.17.12", optional = true} +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 622ed3dd4d..82d8827b02 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.12" +version = "0.17.13-rc.1" [features] default = [] @@ -28,10 +28,10 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_build_info = { path = "../sn_build_info", version = "0.1.16" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0" } -sn_registers = { path = "../sn_registers", version = "0.4.0" } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.4.1-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index fd68714064..3446345669 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.0" +version = "0.4.1-rc.1" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index b0f60bc453..66ca3a2e26 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.0" +version = "0.4.1-rc.1" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.37" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1", features = [ "rpc", ] } -sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index cbd6206fba..ded837cef0 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.20.0" +version = "0.20.1-rc.1" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 48955e7e8c..94dadc7e4a 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.8" +version = "0.4.9-rc.1" [features] local = ["sn_peers_acquisition/local"] @@ -16,9 +16,9 @@ local = ["sn_peers_acquisition/local"] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "~0.6.2" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.1" } +evmlib = { path = "../evmlib", version = "0.1.2-rc.1" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } serde_json = "1.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index 22cdd87d1c..81506b505e 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.55" +version = "0.1.56-rc.1" [dependencies] From f5400f37c1e2814f06288f092aae1be3739ce0a2 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Fri, 25 Oct 2024 22:45:11 +0100 Subject: [PATCH 095/128] chore: stop processing faucet and auditor binaries The util script for generating the release description should no longer process the faucet and auditor binaries. The `sn_cli/safe` crate/binary combination is now replaced with `autonomi-cli/autonomi`. --- resources/scripts/release-candidate-description.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/resources/scripts/release-candidate-description.py b/resources/scripts/release-candidate-description.py index bc9aa8547d..c288bc13fb 100755 --- a/resources/scripts/release-candidate-description.py +++ b/resources/scripts/release-candidate-description.py @@ -70,13 +70,11 @@ def get_pr_list(pr_numbers): def main(pr_numbers): crate_binary_map = { - "sn_faucet": "faucet", "nat-detection": "nat-detection", "node-launchpad": "node-launchpad", - "sn_cli": "safe", + "autonomi-cli": "autonomi", "sn_node": "safenode", "sn_node_manager": "safenode-manager", - "sn_auditor": "sn_auditor", } markdown_doc = [] @@ -103,4 +101,4 @@ def main(pr_numbers): sys.exit(1) file_path = sys.argv[1] - main(read_pr_numbers(file_path)) \ No newline at end of file + main(read_pr_numbers(file_path)) From f37b4d3ce5e29bd223b3addf2156d6144d2963d4 Mon Sep 17 00:00:00 2001 From: Anselme Date: Mon, 28 Oct 2024 09:15:30 +0100 Subject: [PATCH 096/128] Revert "feat(autonomi): allow the uploader to work with the new apis" --- .github/workflows/merge.yml | 6 +- Cargo.lock | 17 - autonomi-cli/src/utils.rs | 41 +- autonomi/Cargo.toml | 5 - autonomi/src/client/data.rs | 78 +- autonomi/src/client/data_private.rs | 70 +- autonomi/src/client/external_signer.rs | 25 +- autonomi/src/client/fs.rs | 12 +- autonomi/src/client/fs_private.rs | 10 +- autonomi/src/client/mod.rs | 14 +- autonomi/src/client/registers.rs | 226 ++--- autonomi/src/client/utils.rs | 96 +- autonomi/src/lib.rs | 2 - autonomi/src/uploader/mod.rs | 590 ----------- autonomi/src/uploader/tests/mod.rs | 557 ---------- autonomi/src/uploader/tests/setup.rs | 480 --------- autonomi/src/uploader/upload.rs | 1232 ----------------------- autonomi/src/utils.rs | 7 +- autonomi/tests/fs.rs | 2 +- sn_evm/src/lib.rs | 1 - sn_networking/Cargo.toml | 1 - sn_networking/src/target_arch.rs | 25 +- sn_protocol/src/messages/chunk_proof.rs | 9 - 23 files changed, 318 insertions(+), 3188 deletions(-) delete mode 100644 autonomi/src/uploader/mod.rs delete mode 100644 autonomi/src/uploader/tests/mod.rs delete mode 100644 autonomi/src/uploader/tests/setup.rs delete mode 100644 autonomi/src/uploader/upload.rs diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 720a2f7e25..d639924585 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -110,10 +110,6 @@ jobs: - uses: Swatinem/rust-cache@v2 - - name: Run autonomi tests - timeout-minutes: 25 - run: cargo test --release --package autonomi --lib --features="full,fs" - - name: Run node tests timeout-minutes: 25 run: cargo test --release --package sn_node --lib @@ -192,7 +188,7 @@ jobs: # only these unit tests require a network, the rest are run above in unit test section - name: Run autonomi --tests - run: cargo test --package autonomi --features="full,fs" --tests -- --nocapture + run: cargo test --package autonomi --tests -- --nocapture env: SN_LOG: "v" # only set the target dir for windows to bypass the linker issue. diff --git a/Cargo.lock b/Cargo.lock index a9e73bd939..d274255dbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -950,18 +950,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" -[[package]] -name = "async-channel" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" -dependencies = [ - "concurrent-queue", - "event-listener-strategy", - "futures-core", - "pin-project-lite", -] - [[package]] name = "async-io" version = "2.3.4" @@ -1091,7 +1079,6 @@ name = "autonomi" version = "0.2.1" dependencies = [ "alloy", - "assert_matches", "bip39", "blst", "blstrs 0.7.1", @@ -1099,13 +1086,11 @@ dependencies = [ "bytes", "console_error_panic_hook", "const-hex", - "custom_debug", "evmlib", "eyre", "futures", "hex 0.4.3", "instant", - "itertools 0.12.1", "js-sys", "libp2p 0.54.1", "rand 0.8.5", @@ -1122,7 +1107,6 @@ dependencies = [ "sn_peers_acquisition", "sn_protocol", "sn_registers", - "tempfile", "test_utils", "thiserror", "tiny_http", @@ -8278,7 +8262,6 @@ name = "sn_networking" version = "0.19.0" dependencies = [ "aes-gcm-siv", - "async-channel", "async-trait", "backoff", "blsttc", diff --git a/autonomi-cli/src/utils.rs b/autonomi-cli/src/utils.rs index 80c46150ad..5f031a3c24 100644 --- a/autonomi-cli/src/utils.rs +++ b/autonomi-cli/src/utils.rs @@ -6,40 +6,28 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use autonomi::client::{Amount, ClientEvent}; - -/// Summary of the upload operation. -#[derive(Debug, Clone)] -pub struct CliUploadSummary { - /// Total tokens spent during the upload. - pub tokens_spent: Amount, - /// Total number of records uploaded. - pub record_count: usize, -} +use autonomi::client::{Amount, ClientEvent, UploadSummary}; /// Collects upload summary from the event receiver. /// Send a signal to the returned sender to stop collecting and to return the result via the join handle. pub fn collect_upload_summary( mut event_receiver: tokio::sync::mpsc::Receiver, ) -> ( - tokio::task::JoinHandle, + tokio::task::JoinHandle, tokio::sync::oneshot::Sender<()>, ) { let (upload_completed_tx, mut upload_completed_rx) = tokio::sync::oneshot::channel::<()>(); let stats_thread = tokio::spawn(async move { - let mut tokens: Amount = Amount::from(0); - let mut records = 0; + let mut tokens_spent: Amount = Amount::from(0); + let mut record_count = 0; loop { tokio::select! { event = event_receiver.recv() => { match event { - Some(ClientEvent::UploadComplete { - tokens_spent, - record_count - }) => { - tokens += tokens_spent; - records += record_count; + Some(ClientEvent::UploadComplete(upload_summary)) => { + tokens_spent += upload_summary.tokens_spent; + record_count += upload_summary.record_count; } None => break, } @@ -51,19 +39,16 @@ pub fn collect_upload_summary( // try to drain the event receiver in case there are any more events while let Ok(event) = event_receiver.try_recv() { match event { - ClientEvent::UploadComplete { - tokens_spent, - record_count, - } => { - tokens += tokens_spent; - records += record_count; + ClientEvent::UploadComplete(upload_summary) => { + tokens_spent += upload_summary.tokens_spent; + record_count += upload_summary.record_count; } } } - CliUploadSummary { - tokens_spent: tokens, - record_count: records, + UploadSummary { + tokens_spent, + record_count, } }); diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 2213775d78..3da273183e 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -30,11 +30,9 @@ bytes = { version = "1.0.1", features = ["serde"] } curv = { version = "0.10.1", package = "sn_curv", default-features = false, features = [ "num-bigint", ] } -custom_debug = "~0.6.1" eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } const-hex = "1.12.0" hex = "~0.4.3" -itertools = "~0.12.1" libp2p = "0.54.1" rand = "0.8.5" rmp-serde = "1.1.1" @@ -60,13 +58,10 @@ blstrs = "0.7.1" [dev-dependencies] alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } -assert_matches = "1.5.0" eyre = "0.6.5" sha2 = "0.10.6" sn_logging = { path = "../sn_logging", version = "0.2.37" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } -sn_registers = { path = "../sn_registers", version = "0.4.0", features = ["test-utils"] } -tempfile = "3.6.0" # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. test_utils = { path = "../test_utils" } diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 542dea1f0b..869022cd37 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -6,11 +6,15 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::client::ClientEvent; -use crate::uploader::{UploadError, Uploader}; -use crate::{self_encryption::encrypt, Client}; use bytes::Bytes; use libp2p::kad::Quorum; +use tokio::task::{JoinError, JoinSet}; + +use std::collections::HashSet; +use xor_name::XorName; + +use crate::client::{ClientEvent, UploadSummary}; +use crate::{self_encryption::encrypt, Client}; use sn_evm::{Amount, AttoTokens}; use sn_evm::{EvmWallet, EvmWalletError}; use sn_networking::{GetRecordCfg, NetworkError}; @@ -18,8 +22,6 @@ use sn_protocol::{ storage::{try_deserialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind}, NetworkAddress, }; -use std::collections::HashSet; -use xor_name::XorName; /// Raw Data Address (points to a DataMap) pub type DataAddr = XorName; @@ -39,14 +41,14 @@ pub enum PutError { PayError(#[from] PayError), #[error("Serialization error: {0}")] Serialization(String), - #[error("Upload Error")] - Upload(#[from] UploadError), #[error("A wallet error occurred.")] Wallet(#[from] sn_evm::EvmError), #[error("The vault owner key does not match the client's public key")] VaultBadOwner, #[error("Payment unexpectedly invalid for {0:?}")] PaymentUnexpectedlyInvalid(NetworkAddress), + #[error("Could not simultaneously upload chunks: {0:?}")] + JoinError(tokio::task::JoinError), } /// Errors that can occur during the pay operation. @@ -78,6 +80,8 @@ pub enum GetError { /// Errors that can occur during the cost calculation. #[derive(Debug, thiserror::Error)] pub enum CostError { + #[error("Could not simultaneously fetch store costs: {0:?}")] + JoinError(JoinError), #[error("Failed to self-encrypt data.")] SelfEncryption(#[from] crate::self_encryption::Error), #[error("Could not get store quote for: {0:?} after several retries")] @@ -114,24 +118,62 @@ impl Client { debug!("Encryption took: {:.2?}", now.elapsed()); let map_xor_name = *data_map_chunk.address().xorname(); + let mut xor_names = vec![map_xor_name]; - let mut uploader = Uploader::new(self.clone(), wallet.clone()); - uploader.insert_chunks(chunks); - uploader.insert_chunks(vec![data_map_chunk]); + for chunk in &chunks { + xor_names.push(*chunk.name()); + } - let summary = uploader.start_upload().await?; + // Pay for all chunks + data map chunk + info!("Paying for {} addresses", xor_names.len()); + let (payment_proofs, _free_chunks) = self + .pay(xor_names.into_iter(), wallet) + .await + .inspect_err(|err| error!("Error paying for data: {err:?}"))?; + + let mut record_count = 0; + + // Upload all the chunks in parallel including the data map chunk + debug!("Uploading {} chunks", chunks.len()); + let mut tasks = JoinSet::new(); + for chunk in chunks.into_iter().chain(std::iter::once(data_map_chunk)) { + let self_clone = self.clone(); + let address = *chunk.address(); + if let Some(proof) = payment_proofs.get(chunk.name()) { + let proof_clone = proof.clone(); + tasks.spawn(async move { + self_clone + .chunk_upload_with_payment(chunk, proof_clone) + .await + .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}")) + }); + } else { + debug!("Chunk at {address:?} was already paid for so skipping"); + } + } + while let Some(result) = tasks.join_next().await { + result + .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) + .map_err(PutError::JoinError)? + .inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; + record_count += 1; + } if let Some(channel) = self.client_event_sender.as_ref() { - if let Err(err) = channel - .send(ClientEvent::UploadComplete { - record_count: summary.uploaded_count, - tokens_spent: summary.storage_cost, - }) - .await - { + let tokens_spent = payment_proofs + .values() + .map(|proof| proof.quote.cost.as_atto()) + .sum::(); + + let summary = UploadSummary { + record_count, + tokens_spent, + }; + if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { error!("Failed to send client event: {err:?}"); } } + Ok(map_xor_name) } diff --git a/autonomi/src/client/data_private.rs b/autonomi/src/client/data_private.rs index b1cda1b3f7..b6d0bfa8a3 100644 --- a/autonomi/src/client/data_private.rs +++ b/autonomi/src/client/data_private.rs @@ -6,15 +6,17 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::data::{GetError, PutError}; -use crate::client::ClientEvent; -use crate::uploader::Uploader; -use crate::{self_encryption::encrypt, Client}; +use std::hash::{DefaultHasher, Hash, Hasher}; + use bytes::Bytes; use serde::{Deserialize, Serialize}; -use sn_evm::EvmWallet; +use sn_evm::{Amount, EvmWallet}; use sn_protocol::storage::Chunk; -use std::hash::{DefaultHasher, Hash, Hasher}; +use tokio::task::JoinSet; + +use super::data::{GetError, PutError}; +use crate::client::{ClientEvent, UploadSummary}; +use crate::{self_encryption::encrypt, Client}; /// Private data on the network can be accessed with this #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd, Ord)] @@ -67,21 +69,53 @@ impl Client { let (data_map_chunk, chunks) = encrypt(data)?; debug!("Encryption took: {:.2?}", now.elapsed()); - // Upload the chunks with the payments - let mut uploader = Uploader::new(self.clone(), wallet.clone()); - uploader.insert_chunks(chunks); - uploader.insert_chunks(vec![data_map_chunk.clone()]); + // Pay for all chunks + let xor_names: Vec<_> = chunks.iter().map(|chunk| *chunk.name()).collect(); + info!("Paying for {} addresses", xor_names.len()); + let (payment_proofs, _free_chunks) = self + .pay(xor_names.into_iter(), wallet) + .await + .inspect_err(|err| error!("Error paying for data: {err:?}"))?; - let summary = uploader.start_upload().await?; + // Upload the chunks with the payments + let mut record_count = 0; + debug!("Uploading {} chunks", chunks.len()); + let mut tasks = JoinSet::new(); + for chunk in chunks { + let self_clone = self.clone(); + let address = *chunk.address(); + if let Some(proof) = payment_proofs.get(chunk.name()) { + let proof_clone = proof.clone(); + tasks.spawn(async move { + self_clone + .chunk_upload_with_payment(chunk, proof_clone) + .await + .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}")) + }); + } else { + debug!("Chunk at {address:?} was already paid for so skipping"); + } + } + while let Some(result) = tasks.join_next().await { + result + .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) + .map_err(PutError::JoinError)? + .inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; + record_count += 1; + } + // Reporting if let Some(channel) = self.client_event_sender.as_ref() { - if let Err(err) = channel - .send(ClientEvent::UploadComplete { - record_count: summary.uploaded_count, - tokens_spent: summary.storage_cost, - }) - .await - { + let tokens_spent = payment_proofs + .values() + .map(|proof| proof.quote.cost.as_atto()) + .sum::(); + + let summary = UploadSummary { + record_count, + tokens_spent, + }; + if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { error!("Failed to send client event: {err:?}"); } } diff --git a/autonomi/src/client/external_signer.rs b/autonomi/src/client/external_signer.rs index 5057bc3b28..b17002bd9c 100644 --- a/autonomi/src/client/external_signer.rs +++ b/autonomi/src/client/external_signer.rs @@ -3,8 +3,7 @@ use crate::client::utils::extract_quote_payments; use crate::self_encryption::encrypt; use crate::Client; use bytes::Bytes; -use sn_evm::{ProofOfPayment, QuotePayment}; -use sn_networking::PayeeQuote; +use sn_evm::{PaymentQuote, ProofOfPayment, QuotePayment}; use sn_protocol::storage::Chunk; use std::collections::HashMap; use xor_name::XorName; @@ -34,7 +33,7 @@ impl Client { data: Bytes, ) -> Result< ( - HashMap, + HashMap, Vec, Vec, ), @@ -42,9 +41,15 @@ impl Client { > { // Encrypt the data as chunks let (_data_map_chunk, _chunks, xor_names) = encrypt_data(data)?; - let cost_map = self.get_store_quotes(xor_names.into_iter()).await?; - let (quote_payments, free_chunks) = extract_quote_payments(&cost_map); + let cost_map: HashMap = self + .get_store_quotes(xor_names.into_iter()) + .await? + .into_iter() + .map(|(name, (_, _, q))| (name, q)) + .collect(); + + let (quote_payments, free_chunks) = extract_quote_payments(&cost_map); Ok((cost_map, quote_payments, free_chunks)) } @@ -57,12 +62,12 @@ impl Client { if let Some(proof) = payment_proofs.get(map_xor_name) { debug!("Uploading data map chunk: {map_xor_name:?}"); - self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone(), None) + self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone()) .await - .inspect_err(|err| error!("Error uploading data map chunk: {err:?}"))?; + .inspect_err(|err| error!("Error uploading data map chunk: {err:?}")) + } else { + Ok(()) } - - Ok(()) } async fn upload_chunks( @@ -74,7 +79,7 @@ impl Client { for chunk in chunks { if let Some(proof) = payment_proofs.get(chunk.name()) { let address = *chunk.address(); - self.chunk_upload_with_payment(chunk.clone(), proof.clone(), None) + self.chunk_upload_with_payment(chunk.clone(), proof.clone()) .await .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?; } diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index 43ab87f504..d7f243df68 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -20,7 +20,7 @@ use super::data::{DataAddr, GetError, PutError}; /// Errors that can occur during the file upload operation. #[cfg(feature = "fs")] #[derive(Debug, thiserror::Error)] -pub enum FileUploadError { +pub enum UploadError { #[error("Failed to recursively traverse directory")] WalkDir(#[from] walkdir::Error), #[error("Input/output failure")] @@ -38,7 +38,7 @@ pub enum FileUploadError { #[cfg(feature = "fs")] /// Errors that can occur during the download operation. #[derive(Debug, thiserror::Error)] -pub enum FileDownloadError { +pub enum DownloadError { #[error("Failed to download file")] GetError(#[from] GetError), #[error("IO failure")] @@ -67,7 +67,7 @@ impl Client { &self, data_addr: DataAddr, to_dest: PathBuf, - ) -> Result<(), FileDownloadError> { + ) -> Result<(), DownloadError> { let data = self.data_get(data_addr).await?; if let Some(parent) = to_dest.parent() { tokio::fs::create_dir_all(parent).await?; @@ -81,7 +81,7 @@ impl Client { &self, archive_addr: ArchiveAddr, to_dest: PathBuf, - ) -> Result<(), FileDownloadError> { + ) -> Result<(), DownloadError> { let archive = self.archive_get(archive_addr).await?; for (path, addr, _meta) in archive.iter() { self.file_download(*addr, to_dest.join(path)).await?; @@ -95,7 +95,7 @@ impl Client { &self, dir_path: PathBuf, wallet: &EvmWallet, - ) -> Result { + ) -> Result { let mut archive = Archive::new(); for entry in walkdir::WalkDir::new(dir_path) { @@ -129,7 +129,7 @@ impl Client { &self, path: PathBuf, wallet: &EvmWallet, - ) -> Result { + ) -> Result { let data = tokio::fs::read(path).await?; let data = Bytes::from(data); let addr = self.data_put(data, wallet).await?; diff --git a/autonomi/src/client/fs_private.rs b/autonomi/src/client/fs_private.rs index 31f7857ec6..0d9b819d70 100644 --- a/autonomi/src/client/fs_private.rs +++ b/autonomi/src/client/fs_private.rs @@ -21,7 +21,7 @@ use std::path::PathBuf; use super::archive_private::{PrivateArchive, PrivateArchiveAccess}; use super::data_private::PrivateDataAccess; -use super::fs::{FileDownloadError, FileUploadError}; +use super::fs::{DownloadError, UploadError}; impl Client { /// Download a private file from network to local file system @@ -29,7 +29,7 @@ impl Client { &self, data_access: PrivateDataAccess, to_dest: PathBuf, - ) -> Result<(), FileDownloadError> { + ) -> Result<(), DownloadError> { let data = self.private_data_get(data_access).await?; if let Some(parent) = to_dest.parent() { tokio::fs::create_dir_all(parent).await?; @@ -43,7 +43,7 @@ impl Client { &self, archive_access: PrivateArchiveAccess, to_dest: PathBuf, - ) -> Result<(), FileDownloadError> { + ) -> Result<(), DownloadError> { let archive = self.private_archive_get(archive_access).await?; for (path, addr, _meta) in archive.iter() { self.private_file_download(addr.clone(), to_dest.join(path)) @@ -58,7 +58,7 @@ impl Client { &self, dir_path: PathBuf, wallet: &EvmWallet, - ) -> Result { + ) -> Result { let mut archive = PrivateArchive::new(); for entry in walkdir::WalkDir::new(dir_path) { @@ -92,7 +92,7 @@ impl Client { &self, path: PathBuf, wallet: &EvmWallet, - ) -> Result { + ) -> Result { let data = tokio::fs::read(path).await?; let data = Bytes::from(data); let addr = self.private_data_put(data, wallet).await?; diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 172fb9ba4f..d530f210f2 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -31,7 +31,7 @@ pub mod vault; pub mod wasm; // private module with utility functions -pub(crate) mod utils; +mod utils; pub use sn_evm::Amount; @@ -207,8 +207,12 @@ async fn handle_event_receiver( /// Events that can be broadcasted by the client. #[derive(Debug, Clone)] pub enum ClientEvent { - UploadComplete { - record_count: usize, - tokens_spent: Amount, - }, + UploadComplete(UploadSummary), +} + +/// Summary of an upload operation. +#[derive(Debug, Clone)] +pub struct UploadSummary { + pub record_count: usize, + pub tokens_spent: Amount, } diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index a17bffb147..52f8944e1e 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -6,30 +6,34 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::data::CostError; -use crate::client::{Client, ClientEvent}; -use crate::uploader::{UploadError, Uploader}; /// Register Secret Key pub use bls::SecretKey as RegisterSecretKey; -use bytes::Bytes; -use libp2p::kad::{Quorum, Record}; use sn_evm::Amount; use sn_evm::AttoTokens; -use sn_evm::EvmWallet; -use sn_evm::ProofOfPayment; +use sn_evm::EvmWalletError; use sn_networking::VerificationKind; +use sn_protocol::storage::RetryStrategy; +pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; + +use crate::client::data::PayError; +use crate::client::Client; +use crate::client::ClientEvent; +use crate::client::UploadSummary; +use bytes::Bytes; +use libp2p::kad::{Quorum, Record}; +use sn_evm::EvmWallet; use sn_networking::{GetRecordCfg, GetRecordError, NetworkError, PutRecordCfg}; use sn_protocol::storage::try_deserialize_record; use sn_protocol::storage::try_serialize_record; use sn_protocol::storage::RecordKind; -use sn_protocol::storage::RetryStrategy; use sn_protocol::NetworkAddress; use sn_registers::Register as BaseRegister; -pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; use sn_registers::{Permissions, RegisterCrdt, RegisterOp, SignedRegister}; use std::collections::BTreeSet; use xor_name::XorName; +use super::data::CostError; + #[derive(Debug, thiserror::Error)] pub enum RegisterError { #[error("Cost error: {0}")] @@ -40,12 +44,16 @@ pub enum RegisterError { Serialization, #[error("Register could not be verified (corrupt)")] FailedVerification, - #[error("Upload Error")] - Upload(#[from] UploadError), + #[error("Payment failure occurred during register creation.")] + Pay(#[from] PayError), + #[error("Failed to retrieve wallet payment")] + Wallet(#[from] EvmWalletError), #[error("Failed to write to low-level register")] Write(#[source] sn_registers::Error), #[error("Failed to sign register")] CouldNotSign(#[source] sn_registers::Error), + #[error("Received invalid quote from node, this node is possibly malfunctioning, try another node by trying another register name")] + InvalidQuote, } #[derive(Clone, Debug)] @@ -111,45 +119,6 @@ impl Register { Ok(()) } - - /// Merge two registers together. - pub(crate) fn merge(&mut self, other: &Self) -> Result<(), RegisterError> { - debug!("Merging Register of: {:?}", self.address()); - - other.signed_reg.verify().map_err(|_| { - error!( - "Failed to verify register at address: {:?}", - other.address() - ); - RegisterError::FailedVerification - })?; - - self.signed_reg.merge(&other.signed_reg).map_err(|err| { - error!("Failed to merge registers {}: {err}", self.address()); - RegisterError::Write(err) - })?; - - for op in other.signed_reg.ops() { - if let Err(err) = self.crdt_reg.apply_op(op.clone()) { - error!( - "Failed to apply {op:?} to Register {}: {err}", - self.address() - ); - return Err(RegisterError::Write(err)); - } - } - - Ok(()) - } - - #[cfg(test)] - pub(crate) fn test_new_from_register(signed_reg: SignedRegister) -> Register { - let crdt_reg = RegisterCrdt::new(*signed_reg.address()); - Register { - signed_reg, - crdt_reg, - } - } } impl Client { @@ -191,18 +160,13 @@ impl Client { }; // Make sure the fetched record contains valid CRDT operations - signed_reg.verify().map_err(|_| { - error!("Failed to verify register at address: {address}"); - RegisterError::FailedVerification - })?; + signed_reg + .verify() + .map_err(|_| RegisterError::FailedVerification)?; let mut crdt_reg = RegisterCrdt::new(*signed_reg.address()); for op in signed_reg.ops() { if let Err(err) = crdt_reg.apply_op(op.clone()) { - error!( - "Failed to apply {op:?} to Register {address}: {err}", - address = signed_reg.address() - ); return Err(RegisterError::Write(err)); } } @@ -222,6 +186,18 @@ impl Client { ) -> Result<(), RegisterError> { register.write_atop(&new_value, &owner)?; + let signed_register = register.signed_reg.clone(); + + // Prepare the record for network storage + let record = Record { + key: NetworkAddress::from_register_address(*register.address()).to_record_key(), + value: try_serialize_record(&signed_register, RecordKind::Register) + .map_err(|_| RegisterError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; + let get_cfg = GetRecordCfg { get_quorum: Quorum::Majority, retry_strategy: Some(RetryStrategy::default()), @@ -236,7 +212,16 @@ impl Client { verification: Some((VerificationKind::Network, get_cfg)), }; - self.register_upload(®ister, None, &put_cfg).await?; + // Store the updated register on the network + self.network + .put_record(record, &put_cfg) + .await + .inspect_err(|err| { + error!( + "Failed to put record - register {:?} to the network: {err}", + register.address() + ) + })?; Ok(()) } @@ -308,79 +293,74 @@ impl Client { // Owner can write to the register. let register = Register::new(Some(value), name, owner, permissions)?; - let address = *register.address(); - - let mut uploader = Uploader::new(self.clone(), wallet.clone()); - uploader.insert_register(vec![register]); - uploader.collect_registers(true); + let address = register.address(); - let summary = uploader.start_upload().await?; - - let register = summary - .uploaded_registers - .get(&address) - .ok_or_else(|| { - error!("Failed to get register with name: {name}"); - RegisterError::Upload(UploadError::InternalError) - })? - .clone(); - - if let Some(channel) = self.client_event_sender.as_ref() { - if let Err(err) = channel - .send(ClientEvent::UploadComplete { - record_count: summary.uploaded_count, - tokens_spent: summary.storage_cost, - }) - .await - { - error!("Failed to send client event: {err:?}"); - } - } + let reg_xor = address.xorname(); + debug!("Paying for register at address: {address}"); + let (payment_proofs, _skipped) = self + .pay(std::iter::once(reg_xor), wallet) + .await + .inspect_err(|err| { + error!("Failed to pay for register at address: {address} : {err}") + })?; + let proof = if let Some(proof) = payment_proofs.get(®_xor) { + proof + } else { + // register was skipped, meaning it was already paid for + error!("Register at address: {address} was already paid for"); + return Err(RegisterError::Network(NetworkError::RegisterAlreadyExists)); + }; - Ok(register) - } + let payee = proof + .to_peer_id_payee() + .ok_or(RegisterError::InvalidQuote) + .inspect_err(|err| error!("Failed to get payee from payment proof: {err}"))?; + let signed_register = register.signed_reg.clone(); + + let record = Record { + key: NetworkAddress::from_register_address(*address).to_record_key(), + value: try_serialize_record( + &(proof, &signed_register), + RecordKind::RegisterWithPayment, + ) + .map_err(|_| RegisterError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; - // Used by the uploader. - pub(crate) async fn register_upload( - &self, - register: &Register, - payment: Option<&ProofOfPayment>, - put_cfg: &PutRecordCfg, - ) -> Result<(), RegisterError> { - let signed_register = ®ister.signed_reg; - let record = if let Some(proof) = payment { - Record { - key: NetworkAddress::from_register_address(*register.address()).to_record_key(), - value: try_serialize_record( - &(proof, signed_register), - RecordKind::RegisterWithPayment, - ) - .map_err(|_| RegisterError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - } - } else { - Record { - key: NetworkAddress::from_register_address(*register.address()).to_record_key(), - value: try_serialize_record(signed_register, RecordKind::Register) - .map_err(|_| RegisterError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - } + let get_cfg = GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::default()), + target_record: None, + expected_holders: Default::default(), + is_register: true, + }; + let put_cfg = PutRecordCfg { + put_quorum: Quorum::All, + retry_strategy: None, + use_put_record_to: Some(vec![payee]), + verification: Some((VerificationKind::Network, get_cfg)), }; + debug!("Storing register at address {address} to the network"); self.network - .put_record(record, put_cfg) + .put_record(record, &put_cfg) .await .inspect_err(|err| { - error!( - "Failed to put record - register {:?} to the network: {err}", - register.address() - ) + error!("Failed to put record - register {address} to the network: {err}") })?; - Ok(()) + if let Some(channel) = self.client_event_sender.as_ref() { + let summary = UploadSummary { + record_count: 1, + tokens_spent: proof.quote.cost.as_atto(), + }; + if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { + error!("Failed to send client event: {err}"); + } + } + + Ok(register) } } diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index ec6515a78b..68ae70f2f7 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -6,28 +6,30 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use super::{ - data::{CostError, GetError, PayError}, - Client, -}; -use crate::utils::payment_proof_from_quotes_and_payments; -use crate::{self_encryption::DataMapLevel, uploader::UploadError}; +use std::{collections::HashMap, num::NonZero}; + use bytes::Bytes; use libp2p::kad::{Quorum, Record}; use rand::{thread_rng, Rng}; use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; -use sn_evm::{EvmWallet, ProofOfPayment, QuotePayment}; +use sn_evm::{EvmWallet, PaymentQuote, ProofOfPayment, QuotePayment}; use sn_networking::{ GetRecordCfg, Network, NetworkError, PayeeQuote, PutRecordCfg, VerificationKind, }; use sn_protocol::{ messages::ChunkProof, storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind, RetryStrategy}, - NetworkAddress, CLOSE_GROUP_SIZE, + NetworkAddress, }; -use std::{collections::HashMap, num::NonZero}; use xor_name::XorName; +use super::{ + data::{CostError, GetError, PayError, PutError}, + Client, +}; +use crate::self_encryption::DataMapLevel; +use crate::utils::payment_proof_from_quotes_and_payments; + impl Client { /// Fetch and decrypt all chunks in the data map. pub(crate) async fn fetch_from_data_map(&self, data_map: &DataMap) -> Result { @@ -87,8 +89,7 @@ impl Client { &self, chunk: Chunk, payment: ProofOfPayment, - cfg: Option, - ) -> Result<(), UploadError> { + ) -> Result<(), PutError> { let storing_node = payment.to_peer_id_payee().expect("Missing node Peer ID"); debug!("Storing chunk: {chunk:?} to {:?}", storing_node); @@ -100,7 +101,7 @@ impl Client { key: key.clone(), value: try_serialize_record(&(payment, chunk.clone()), record_kind) .map_err(|e| { - UploadError::Serialization(format!( + PutError::Serialization(format!( "Failed to serialize chunk with payment: {e:?}" )) })? @@ -109,41 +110,35 @@ impl Client { expires: None, }; - let put_cfg = if let Some(cfg) = cfg { - cfg - } else { - let verification = { - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::N( - NonZero::new(CLOSE_GROUP_SIZE / 2).expect("2 is non-zero"), - ), - retry_strategy: Some(RetryStrategy::Quick), - target_record: None, - expected_holders: Default::default(), - is_register: false, - }; - - let random_nonce = thread_rng().gen::(); - let expected_proof = - ChunkProof::from_chunk(&chunk, random_nonce).map_err(|err| { - UploadError::Serialization(format!("Failed to obtain chunk proof: {err:?}")) - })?; - - Some(( - VerificationKind::ChunkProof { - expected_proof, - nonce: random_nonce, - }, - verification_cfg, - )) + let verification = { + let verification_cfg = GetRecordCfg { + get_quorum: Quorum::N(NonZero::new(2).expect("2 is non-zero")), + retry_strategy: Some(RetryStrategy::Quick), + target_record: None, + expected_holders: Default::default(), + is_register: false, }; - PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy: Some(RetryStrategy::Balanced), - use_put_record_to: Some(vec![storing_node]), - verification, - } + let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk) + .map_err(|e| PutError::Serialization(format!("Failed to serialize chunk: {e:?}")))? + .to_vec(); + let random_nonce = thread_rng().gen::(); + let expected_proof = ChunkProof::new(&stored_on_node, random_nonce); + + Some(( + VerificationKind::ChunkProof { + expected_proof, + nonce: random_nonce, + }, + verification_cfg, + )) + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::One, + retry_strategy: Some(RetryStrategy::Balanced), + use_put_record_to: Some(vec![storing_node]), + verification, }; Ok(self.network.put_record(record, &put_cfg).await?) } @@ -154,7 +149,12 @@ impl Client { content_addrs: impl Iterator, wallet: &EvmWallet, ) -> Result<(HashMap, Vec), PayError> { - let cost_map = self.get_store_quotes(content_addrs).await?; + let cost_map = self + .get_store_quotes(content_addrs) + .await? + .into_iter() + .map(|(name, (_, _, q))| (name, q)) + .collect(); let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); @@ -233,12 +233,12 @@ async fn fetch_store_quote( /// Form to be executed payments and already executed payments from a cost map. pub(crate) fn extract_quote_payments( - cost_map: &HashMap, + cost_map: &HashMap, ) -> (Vec, Vec) { let mut to_be_paid = vec![]; let mut already_paid = vec![]; - for (chunk_address, (_, _, quote)) in cost_map.iter() { + for (chunk_address, quote) in cost_map.iter() { if quote.cost.is_zero() { already_paid.push(*chunk_address); } else { diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 6a1476d900..c73bef1378 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -33,8 +33,6 @@ extern crate tracing; pub mod client; -pub mod uploader; - #[cfg(feature = "data")] mod self_encryption; mod utils; diff --git a/autonomi/src/uploader/mod.rs b/autonomi/src/uploader/mod.rs deleted file mode 100644 index cd5f7e4fc2..0000000000 --- a/autonomi/src/uploader/mod.rs +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -//! The uploader module provides an interface to upload data to the network, with the ability to retry failed uploads, -//! make repayments for failed payments, and verify the data after it has been uploaded. -//! -//! # Example -//! let mut uploader = Uploader::new(self.clone(), wallet.clone()); -//! uploader.insert_chunks(chunks); -//! uploader.insert_chunks(vec![data_map_chunk]); -//! let summary = uploader.start_upload().await?; -//! -//! # Configuration -//! The `Uploader` can be configured using the `UploadCfg` struct. The most notable options are the `batch_size` and -//! `payment_batch_size` which determine the number of data that are processed in parallel and the number of payments -//! that are made in a single evm transaction, respectively. -//! Also the `max_repayments_for_failed_data` option determines the maximum number of repayments to make if the -//! initial payment fails. - -#[cfg(test)] -mod tests; -mod upload; - -#[cfg(feature = "registers")] -use crate::client::registers::{Register, RegisterError}; -use crate::Client; -use itertools::Either; -use sn_evm::EvmWallet; -use sn_evm::{Amount, EvmNetworkTokenError, ProofOfPayment}; -use sn_networking::target_arch::{mpsc, mpsc_channel}; -use sn_networking::{NetworkError, PayeeQuote}; -#[cfg(feature = "data")] -use sn_protocol::storage::{Chunk, ChunkAddress}; -use sn_protocol::{storage::RetryStrategy, NetworkAddress}; -#[cfg(feature = "registers")] -use sn_registers::RegisterAddress; -use std::{ - collections::{HashMap, HashSet}, - fmt::Debug, - path::PathBuf, -}; -use upload::InnerUploader; -use xor_name::XorName; - -/// The default batch size that determines the number of data that are processed in parallel. -/// This includes fetching the store cost, uploading and verifying the data. -/// Use PAYMENT_BATCH_SIZE to control the number of payments made in a single transaction. -pub const BATCH_SIZE: usize = 16; - -/// The number of payments to make in a single EVM transaction. -pub const PAYMENT_BATCH_SIZE: usize = 512; - -/// The number of repayments to attempt for a failed item before returning an error. -/// If value = 1, we do an initial payment & 1 repayment. Thus we make a max 2 payments per data item. -#[cfg(not(test))] -pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 3; -#[cfg(test)] -pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 1; - -#[derive(Debug, thiserror::Error)] -pub enum UploadError { - #[error("Network Token error: {0:?}")] - EvmNetworkTokenError(#[from] EvmNetworkTokenError), - #[error("Internal Error")] - InternalError, - #[error("Invalid cfg: {0:?}")] - InvalidCfg(String), - #[error("I/O error: {0:?}")] - Io(#[from] std::io::Error), - #[error("The upload failed with maximum repayments reached for multiple items: {items:?}")] - MaximumRepaymentsReached { items: Vec }, - #[error("Network error: {0:?}")] - Network(#[from] NetworkError), - #[cfg(feature = "registers")] - #[error("Register could not be verified (corrupt)")] - RegisterFailedVerification, - #[cfg(feature = "registers")] - #[error("Failed to write to low-level register")] - RegisterWrite(#[source] sn_registers::Error), - #[cfg(feature = "registers")] - #[error("Failed to sign register")] - RegisterCouldNotSign(#[source] sn_registers::Error), - #[error("Multiple consecutive network errors reported during upload")] - SequentialNetworkErrors, - #[error("Too many sequential payment errors reported during upload")] - SequentialUploadPaymentError, - #[error("Failed to serialize {0}")] - Serialization(String), -} - -// UploadError is used inside RegisterError, but the uploader emits RegisterError. So this is used to avoid -// recursive enum definition. -#[cfg(feature = "registers")] -impl From for UploadError { - fn from(err: RegisterError) -> Self { - match err { - RegisterError::Network(err) => Self::Network(err), - RegisterError::Write(err) => Self::RegisterWrite(err), - RegisterError::CouldNotSign(err) => Self::RegisterCouldNotSign(err), - RegisterError::Cost(_) => Self::InternalError, - RegisterError::Serialization => Self::Serialization("Register".to_string()), - RegisterError::FailedVerification => Self::RegisterFailedVerification, - RegisterError::Upload(err) => err, - } - } -} - -/// The set of options to pass into the `Uploader` -#[derive(Debug, Clone, Copy)] -pub struct UploadCfg { - pub batch_size: usize, - pub payment_batch_size: usize, - pub verify_store: bool, - pub show_holders: bool, - pub retry_strategy: RetryStrategy, - pub max_repayments_for_failed_data: usize, - #[cfg(feature = "registers")] - pub collect_registers: bool, -} - -impl Default for UploadCfg { - fn default() -> Self { - Self { - batch_size: BATCH_SIZE, - payment_batch_size: PAYMENT_BATCH_SIZE, - verify_store: true, - show_holders: false, - retry_strategy: RetryStrategy::Balanced, - max_repayments_for_failed_data: MAX_REPAYMENTS_PER_FAILED_ITEM, - #[cfg(feature = "registers")] - collect_registers: false, - } - } -} - -/// The result of a successful upload. -#[derive(Debug, Clone, Default)] -pub struct UploadSummary { - pub storage_cost: Amount, - pub final_balance: Amount, - pub uploaded_addresses: HashSet, - #[cfg(feature = "registers")] - pub uploaded_registers: HashMap, - /// The number of records that were paid for and uploaded to the network. - pub uploaded_count: usize, - /// The number of records that were skipped during because they were already present in the network. - pub skipped_count: usize, -} - -impl UploadSummary { - /// Merge two UploadSummary together. - pub fn merge(mut self, other: Self) -> Result> { - self.uploaded_addresses.extend(other.uploaded_addresses); - #[cfg(feature = "registers")] - self.uploaded_registers.extend(other.uploaded_registers); - - let summary = Self { - storage_cost: self - .storage_cost - .checked_add(other.storage_cost) - .ok_or_else(|| { - error!("Failed to merge UploadSummary: NumericOverflow"); - UploadError::InternalError - })?, - final_balance: self - .final_balance - .checked_add(other.storage_cost) - .ok_or_else(|| { - error!("Failed to merge UploadSummary: NumericOverflow"); - UploadError::InternalError - })?, - uploaded_addresses: self.uploaded_addresses, - #[cfg(feature = "registers")] - uploaded_registers: self.uploaded_registers, - uploaded_count: self.uploaded_count + other.uploaded_count, - skipped_count: self.skipped_count + other.skipped_count, - }; - Ok(summary) - } -} - -#[derive(Debug, Clone)] -/// The events emitted from the upload process. -pub enum UploadEvent { - /// Uploaded a record to the network. - #[cfg(feature = "data")] - ChunkUploaded(ChunkAddress), - /// Uploaded a Register to the network. - /// The returned register is just the passed in register. - #[cfg(feature = "registers")] - RegisterUploaded(Register), - /// The Chunk already exists in the network. No payments were made. - #[cfg(feature = "data")] - ChunkAlreadyExistsInNetwork(ChunkAddress), - /// The Register already exists in the network. The locally register changes were pushed to the network. - /// No payments were made. - /// The returned register contains the remote replica merged with the passed in register. - #[cfg(feature = "registers")] - RegisterUpdated(Register), - /// Payment for a batch of records has been made. - PaymentMade { tokens_spent: Amount }, - /// The upload process has terminated with an error. - // Note: We cannot send the Error enum as it does not implement Clone. So we cannot even do Result if - // we also want to return this error from the function. - Error, -} - -pub struct Uploader { - // Has to be stored as an Option as we have to take ownership of inner during the upload. - inner: Option, -} - -impl Uploader { - /// Start the upload process. - pub async fn start_upload(mut self) -> Result { - let event_sender = self - .inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .event_sender - .clone(); - match upload::start_upload(Box::new(self)).await { - Err(err) => { - if let Some(event_sender) = event_sender { - if let Err(err) = event_sender.send(UploadEvent::Error).await { - error!("Error while emitting event: {err:?}"); - } - } - Err(err) - } - Ok(summary) => Ok(summary), - } - } - - /// Creates a new instance of `Uploader` with the default configuration. - /// To modify the configuration, use the provided setter methods (`set_...` functions). - // NOTE: Self has to be constructed only using this method. We expect `Self::inner` is present everywhere. - pub fn new(client: Client, wallet: EvmWallet) -> Self { - Self { - inner: Some(InnerUploader::new(client, wallet)), - } - } - - /// Update all the configurations by passing the `UploadCfg` struct - pub fn set_upload_cfg(&mut self, cfg: UploadCfg) { - // Self can only be constructed with new(), which will set inner to InnerUploader always. - // So it is okay to call unwrap here. - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_cfg(cfg); - } - - /// Sets the default batch size that determines the number of data that are processed in parallel. - /// - /// By default, this option is set to the constant `BATCH_SIZE: usize = 16`. - pub fn set_batch_size(&mut self, batch_size: usize) { - // Self can only be constructed with new(), which will set inner to InnerUploader always. - // So it is okay to call unwrap here. - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_batch_size(batch_size); - } - - /// Sets the default payment batch size that determines the number of payments that are made in a single - /// transaction. The maximum number of payments that can be made in a single transaction is 512. - /// - /// By default, this option is set to the constant `PAYMENT_BATCH_SIZE: usize = 512`. - pub fn set_payment_batch_size(&mut self, payment_batch_size: usize) { - // Self can only be constructed with new(), which will set inner to InnerUploader always. - // So it is okay to call unwrap here. - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_payment_batch_size(payment_batch_size); - } - - /// Sets the option to verify the data after they have been uploaded. - /// - /// By default, this option is set to `true`. - pub fn set_verify_store(&mut self, verify_store: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_verify_store(verify_store); - } - - /// Sets the option to display the holders that are expected to be holding the data during verification. - /// - /// By default, this option is set to false. - pub fn set_show_holders(&mut self, show_holders: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_show_holders(show_holders); - } - - /// Sets the RetryStrategy to increase the re-try during the GetStoreCost & Upload tasks. - /// This does not affect the retries during the Payment task. Use `set_max_repayments_for_failed_data` to - /// configure the re-payment attempts. - /// - /// By default, this option is set to `RetryStrategy::Quick` - pub fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_retry_strategy(retry_strategy); - } - - /// Sets the maximum number of repayments to perform if the initial payment failed. - /// NOTE: This creates an extra Spend and uses the wallet funds. - /// - /// By default, this option is set to `3` repayments. - pub fn set_max_repayments_for_failed_data(&mut self, retries: usize) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_max_repayments_for_failed_data(retries); - } - - /// Enables the uploader to return all the registers that were Uploaded or Updated. - /// The registers are emitted through the event channel whenever they're completed, but this returns them - /// through the UploadSummary when the whole upload process completes. - /// - /// By default, this option is set to `False` - #[cfg(feature = "registers")] - pub fn collect_registers(&mut self, collect_registers: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_collect_registers(collect_registers); - } - - /// Returns a receiver for UploadEvent. - /// This method is optional and the upload process can be performed without it. - pub fn get_event_receiver(&mut self) -> mpsc::Receiver { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .get_event_receiver() - } - - /// Insert a list of chunk paths into the uploader. - /// - /// Use `start_upload` to start the upload process. - #[cfg(feature = "fs")] - pub fn insert_chunk_paths(&mut self, chunks: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_chunk_paths(chunks); - } - - /// Insert a list of chunks into the uploader. - /// - /// Use `start_upload` to start the upload process. - #[cfg(feature = "data")] - pub fn insert_chunks(&mut self, chunks: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_chunks(chunks); - } - - /// Insert a list of registers into the uploader. To get the updated registers, set `collect_registers` to true. - /// - /// Use `start_upload` to start the upload process. - #[cfg(feature = "registers")] - pub fn insert_register(&mut self, registers: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_register(registers); - } -} - -// ======= Private ======== - -/// An interface to make the testing easier by not interacting with the network. -trait UploaderInterface: Send + Sync { - fn take_inner_uploader(&mut self) -> InnerUploader; - - // Mutable reference is used in tests. - fn submit_get_register_task( - &mut self, - client: Client, - reg_addr: RegisterAddress, - task_result_sender: mpsc::Sender, - ); - - fn submit_push_register_task( - &mut self, - client: Client, - upload_item: UploadItem, - verify_store: bool, - task_result_sender: mpsc::Sender, - ); - - #[expect(clippy::too_many_arguments)] - fn submit_get_store_cost_task( - &mut self, - client: Client, - xorname: XorName, - address: NetworkAddress, - previous_payments: Option<&Vec>, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - task_result_sender: mpsc::Sender, - ); - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - make_payment_sender: mpsc::Sender)>>, - ); - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - client: Client, - previous_payments: Option<&Vec>, - verify_store: bool, - retry_strategy: RetryStrategy, - task_result_sender: mpsc::Sender, - ); -} - -// Configuration functions are used in tests. So these are defined here and re-used inside `Uploader` -impl InnerUploader { - pub(super) fn set_cfg(&mut self, cfg: UploadCfg) { - self.cfg = cfg; - } - - pub(super) fn set_batch_size(&mut self, batch_size: usize) { - self.cfg.batch_size = batch_size; - } - - pub(super) fn set_payment_batch_size(&mut self, payment_batch_size: usize) { - self.cfg.payment_batch_size = payment_batch_size; - } - - pub(super) fn set_verify_store(&mut self, verify_store: bool) { - self.cfg.verify_store = verify_store; - } - - pub(super) fn set_show_holders(&mut self, show_holders: bool) { - self.cfg.show_holders = show_holders; - } - - pub(super) fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { - self.cfg.retry_strategy = retry_strategy; - } - - pub(super) fn set_max_repayments_for_failed_data(&mut self, retries: usize) { - self.cfg.max_repayments_for_failed_data = retries; - } - - #[cfg(feature = "registers")] - pub(super) fn set_collect_registers(&mut self, collect_registers: bool) { - self.cfg.collect_registers = collect_registers; - } - - pub(super) fn get_event_receiver(&mut self) -> mpsc::Receiver { - let (tx, rx) = mpsc_channel(100); - self.event_sender = Some(tx); - rx - } - - #[cfg(feature = "fs")] - pub(super) fn insert_chunk_paths( - &mut self, - chunks: impl IntoIterator, - ) { - self.all_upload_items - .extend(chunks.into_iter().map(|(xorname, path)| { - let item = UploadItem::Chunk { - address: ChunkAddress::new(xorname), - chunk: Either::Right(path), - }; - (xorname, item) - })); - } - - #[cfg(feature = "data")] - pub(super) fn insert_chunks(&mut self, chunks: impl IntoIterator) { - self.all_upload_items - .extend(chunks.into_iter().map(|chunk| { - let xorname = *chunk.name(); - let item = UploadItem::Chunk { - address: *chunk.address(), - chunk: Either::Left(chunk), - }; - (xorname, item) - })); - } - - #[cfg(feature = "registers")] - pub(super) fn insert_register(&mut self, registers: impl IntoIterator) { - self.all_upload_items - .extend(registers.into_iter().map(|reg| { - let address = *reg.address(); - let item = UploadItem::Register { address, reg }; - (address.xorname(), item) - })); - } -} - -#[derive(Debug, Clone)] -enum UploadItem { - #[cfg(feature = "data")] - Chunk { - address: ChunkAddress, - // Either the actual chunk or the path to the chunk. - chunk: Either, - }, - #[cfg(feature = "registers")] - Register { - address: RegisterAddress, - reg: Register, - }, -} - -impl UploadItem { - fn address(&self) -> NetworkAddress { - match self { - #[cfg(feature = "data")] - Self::Chunk { address, .. } => NetworkAddress::from_chunk_address(*address), - #[cfg(feature = "registers")] - Self::Register { address, .. } => NetworkAddress::from_register_address(*address), - } - } - - fn xorname(&self) -> XorName { - match self { - #[cfg(feature = "data")] - UploadItem::Chunk { address, .. } => *address.xorname(), - #[cfg(feature = "registers")] - UploadItem::Register { address, .. } => address.xorname(), - } - } -} - -#[derive(Debug)] -enum TaskResult { - #[cfg(feature = "registers")] - GetRegisterFromNetworkOk { - remote_register: Register, - }, - #[cfg(feature = "registers")] - GetRegisterFromNetworkErr(XorName), - #[cfg(feature = "registers")] - PushRegisterOk { - updated_register: Register, - }, - PushRegisterErr(XorName), - GetStoreCostOk { - xorname: XorName, - quote: Box, - }, - GetStoreCostErr { - xorname: XorName, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_reached: bool, - }, - MakePaymentsOk { - payment_proofs: HashMap, - }, - MakePaymentsErr { - failed_xornames: Vec<(XorName, Box)>, - }, - UploadOk(XorName), - UploadErr { - xorname: XorName, - io_error: Option>, - }, -} - -#[derive(Debug, Clone)] -enum GetStoreCostStrategy { - /// Selects the PeerId with the lowest quote - Cheapest, - /// Selects the cheapest PeerId that we have not made payment to. - SelectDifferentPayee, -} diff --git a/autonomi/src/uploader/tests/mod.rs b/autonomi/src/uploader/tests/mod.rs deleted file mode 100644 index 80fb47f415..0000000000 --- a/autonomi/src/uploader/tests/mod.rs +++ /dev/null @@ -1,557 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod setup; - -use crate::uploader::{ - tests::setup::{ - get_dummy_chunk_paths, get_dummy_registers, get_inner_uploader, start_uploading_with_steps, - TestSteps, - }, - UploadError, UploadEvent, -}; -use assert_matches::assert_matches; -use bls::SecretKey; -use eyre::Result; -use sn_logging::LogBuilder; -use std::collections::VecDeque; -use tempfile::tempdir; - -// ===== HAPPY PATH ======= - -/// 1. Chunk: if cost =0, then chunk is present in the network. -#[tokio::test] -async fn chunk_that_already_exists_in_the_network_should_return_zero_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![TestSteps::GetStoreCostOk { - trigger_zero_cost: true, - assert_select_different_payee: false, - }]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - SecretKey::random(), - task_result_rx, - ); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 1); - assert_matches!(events[0], UploadEvent::ChunkAlreadyExistsInNetwork(_)); - Ok(()) -} - -/// 2. Chunk: if cost !=0, then make payment upload to the network. -#[tokio::test] -async fn chunk_should_be_paid_for_and_uploaded_if_cost_is_not_zero() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - SecretKey::random(), - task_result_rx, - ); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::ChunkUploaded(..)); - Ok(()) -} - -/// 3. Register: if GET register = ok, then merge and push the register. -#[tokio::test] -async fn register_should_be_merged_and_pushed_if_it_already_exists_in_the_network() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - let register_sk = SecretKey::random(); - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); - - // the path to test - let steps = vec![TestSteps::GetRegisterOk, TestSteps::PushRegisterOk]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - register_sk, - task_result_rx, - ); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 1); - assert_matches!(events[0], UploadEvent::RegisterUpdated { .. }); - Ok(()) -} - -/// 4. Register: if Get register = err, then get store cost and upload. -#[tokio::test] -async fn register_should_be_paid_and_uploaded_if_it_does_not_exists() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - let register_sk = SecretKey::random(); - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); - - // the path to test - // todo: what if cost = 0 even after GetRegister returns error. check that - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - register_sk, - task_result_rx, - ); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::RegisterUploaded(..)); - Ok(()) -} - -// ===== REPAYMENTS ====== - -/// 1. Chunks: if upload task fails > threshold, then get store cost should be triggered with SelectDifferentStrategy -/// and then uploaded. -#[tokio::test] -async fn chunks_should_perform_repayment_if_the_upload_fails_multiple_times() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr { io_error: false }, - TestSteps::UploadItemErr { io_error: false }, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - SecretKey::random(), - task_result_rx, - ); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 3); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - assert_matches!(events[2], UploadEvent::ChunkUploaded(..)); - Ok(()) -} - -/// 2. Register: if upload task fails > threshold, then get store cost should be triggered with SelectDifferentStrategy -/// and then uploaded. -#[tokio::test] -async fn registers_should_perform_repayment_if_the_upload_fails_multiple_times() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - let register_sk = SecretKey::random(); - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr { io_error: false }, - TestSteps::UploadItemErr { io_error: false }, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - register_sk, - task_result_rx, - ); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 3); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - assert_matches!(events[2], UploadEvent::RegisterUploaded(..)); - Ok(()) -} - -// ===== ERRORS ======= -/// 1. Registers: Multiple PushRegisterErr should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn register_upload_should_error_out_if_there_are_multiple_push_failures() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - let register_sk = SecretKey::random(); - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterOk, - TestSteps::PushRegisterErr, - TestSteps::PushRegisterErr, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - register_sk, - task_result_rx, - ); - - assert_matches!( - upload_handle.await?, - Err(UploadError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 2. Chunk: Multiple errors during get store cost should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn chunk_should_error_out_if_there_are_multiple_errors_during_get_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - SecretKey::random(), - task_result_rx, - ); - - assert_matches!( - upload_handle.await?, - Err(UploadError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 3. Register: Multiple errors during get store cost should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn register_should_error_out_if_there_are_multiple_errors_during_get_store_cost() -> Result<()> -{ - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - let register_sk = SecretKey::random(); - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - register_sk, - task_result_rx, - ); - - assert_matches!( - upload_handle.await?, - Err(UploadError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 4. Chunk: Multiple errors during make payment should result in Error::SequentialUploadPaymentError -#[tokio::test] -async fn chunk_should_error_out_if_there_are_multiple_errors_during_make_payment() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentErr, - TestSteps::MakePaymentErr, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - SecretKey::random(), - task_result_rx, - ); - - assert_matches!( - upload_handle.await?, - Err(UploadError::SequentialUploadPaymentError) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 5. Register: Multiple errors during make payment should result in Error::SequentialUploadPaymentError -#[tokio::test] -async fn register_should_error_out_if_there_are_multiple_errors_during_make_payment() -> Result<()> -{ - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - let register_sk = SecretKey::random(); - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, ®ister_sk)); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentErr, - TestSteps::MakePaymentErr, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - register_sk, - task_result_rx, - ); - - assert_matches!( - upload_handle.await?, - Err(UploadError::SequentialUploadPaymentError) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -// 6: Chunks + Registers: if the number of repayments exceed a threshold, it should return MaximumRepaymentsReached error. -#[tokio::test] -async fn maximum_repayment_error_should_be_triggered_during_get_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - // initial payment done - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr { io_error: false }, - TestSteps::UploadItemErr { io_error: false }, - // first repayment - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr { io_error: false }, - TestSteps::UploadItemErr { io_error: false }, - // thus after reaching max repayments, we should error out during get store cost. - TestSteps::GetStoreCostErr { - assert_select_different_payee: true, - }, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - SecretKey::random(), - task_result_rx, - ); - - assert_matches!( - upload_handle.await?, - Err(UploadError::MaximumRepaymentsReached { .. }) - ); - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - Ok(()) -} - -// 7. if we get io error during upload, then the entire upload should error out. -#[tokio::test] -async fn io_error_during_upload_should_stop_the_uploads() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader()?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.set_payment_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr { io_error: true }, - ]; - - let (upload_handle, events_handle) = start_uploading_with_steps( - inner_uploader, - VecDeque::from(steps), - SecretKey::random(), - task_result_rx, - ); - - assert_matches!(upload_handle.await?, Err(UploadError::Io { .. })); - let events = events_handle.await?; - - assert_eq!(events.len(), 1); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - Ok(()) -} diff --git a/autonomi/src/uploader/tests/setup.rs b/autonomi/src/uploader/tests/setup.rs deleted file mode 100644 index 98fe2128a7..0000000000 --- a/autonomi/src/uploader/tests/setup.rs +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - client::registers::Register, - uploader::{ - upload::{start_upload, InnerUploader}, - GetStoreCostStrategy, TaskResult, UploadError, UploadEvent, UploadItem, UploadSummary, - UploaderInterface, - }, - Client, -}; -use alloy::{primitives::TxHash, signers::local::PrivateKeySigner}; -use assert_matches::assert_matches; -use bls::SecretKey as BlsSecretKey; -use eyre::Result; -use libp2p::{identity::Keypair, PeerId}; -use rand::thread_rng; -use sn_evm::{EvmNetwork, EvmWallet, PaymentQuote, ProofOfPayment}; -use sn_networking::{NetworkBuilder, PayeeQuote}; -use sn_protocol::{storage::RetryStrategy, NetworkAddress}; -use sn_registers::{RegisterAddress, SignedRegister}; -use std::{ - collections::{BTreeMap, HashMap, VecDeque}, - path::PathBuf, - sync::Arc, -}; -use tokio::{runtime::Handle, sync::mpsc, task::JoinHandle}; -use xor_name::XorName; - -struct TestUploader { - inner: Option, - test_steps: VecDeque, - task_result_sender: mpsc::Sender, - - // test states - make_payment_collector: Vec<(XorName, Box)>, - payments_made_per_xorname: BTreeMap, - payment_batch_size: usize, - register_sk: BlsSecretKey, -} - -impl UploaderInterface for TestUploader { - fn take_inner_uploader(&mut self) -> InnerUploader { - self.inner.take().unwrap() - } - - fn submit_get_register_task( - &mut self, - _client: Client, - reg_addr: RegisterAddress, - _task_result_sender: mpsc::Sender, - ) { - let xorname = reg_addr.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a GetRegister step."); - let handle = Handle::current(); - let register_sk = self.register_sk.clone(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_get_register called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_get_register called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::GetRegisterOk => { - handle.spawn(async move { - let remote_register = - SignedRegister::test_new_from_address(reg_addr, ®ister_sk); - let remote_register = Register::test_new_from_register(remote_register); - task_result_sender - .send(TaskResult::GetRegisterFromNetworkOk { remote_register }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::GetRegisterErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetRegisterFromNetworkErr(xorname)) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected GetRegister step. Got: {con:?}"), - } - } - - fn submit_push_register_task( - &mut self, - _client: Client, - upload_item: UploadItem, - _verify_store: bool, - _task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a PushRegister step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_push_register called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_push_register called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::PushRegisterOk => { - handle.spawn(async move { - let updated_register = match upload_item { - UploadItem::Register { reg, .. } => reg, - _ => panic!("Expected UploadItem::Register"), - }; - task_result_sender - .send(TaskResult::PushRegisterOk { - // this register is just used for returning. - updated_register, - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::PushRegisterErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::PushRegisterErr(xorname)) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected PushRegister step. Got: {con:?}"), - } - } - - fn submit_get_store_cost_task( - &mut self, - _client: Client, - xorname: XorName, - _address: NetworkAddress, - _previous_payments: Option<&Vec>, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - _task_result_sender: mpsc::Sender, - ) { - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a GetStoreCost step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_get_store_cost called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_get_store_cost called for: {xorname:?}. Step to execute: {step:?}"); - - let has_max_payments_reached_closure = - |get_store_cost_strategy: &GetStoreCostStrategy| -> bool { - match get_store_cost_strategy { - GetStoreCostStrategy::SelectDifferentPayee => { - if let Some(n_payments) = self.payments_made_per_xorname.get(&xorname) { - InnerUploader::have_we_reached_max_repayments( - *n_payments, - max_repayments_for_failed_data, - ) - } else { - false - } - } - _ => false, - } - }; - - // if select different payee, then it can possibly error out if max_repayments have been reached. - // then the step should've been a GetStoreCostErr. - if has_max_payments_reached_closure(&get_store_cost_strategy) { - assert_matches!(step, TestSteps::GetStoreCostErr { .. }, "Max repayments have been reached, so we expect a GetStoreCostErr, not GetStoreCostOk"); - } - - match step { - TestSteps::GetStoreCostOk { - trigger_zero_cost, - assert_select_different_payee, - } => { - // Make sure that the received strategy is the one defined in the step. - assert!(match get_store_cost_strategy { - // match here to not miss out on any new strategies. - GetStoreCostStrategy::Cheapest => !assert_select_different_payee, - GetStoreCostStrategy::SelectDifferentPayee { .. } => - assert_select_different_payee, - }); - - let mut quote = PaymentQuote::zero(); - if !trigger_zero_cost { - quote.cost = 1.into(); - } - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetStoreCostOk { - xorname, - quote: Box::new(( - PeerId::random(), - PrivateKeySigner::random().address(), - quote, - )), - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::GetStoreCostErr { - assert_select_different_payee, - } => { - // Make sure that the received strategy is the one defined in the step. - assert!(match get_store_cost_strategy { - // match here to not miss out on any new strategies. - GetStoreCostStrategy::Cheapest => !assert_select_different_payee, - GetStoreCostStrategy::SelectDifferentPayee { .. } => - assert_select_different_payee, - }); - let max_repayments_reached = - has_max_payments_reached_closure(&get_store_cost_strategy); - - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected GetStoreCost step. Got: {con:?}"), - } - } - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - _make_payment_sender: mpsc::Sender)>>, - ) { - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a MakePayment step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - match &to_send { - Some((upload_item, quote)) => { - let xorname = upload_item.xorname(); - println!("spawn_make_payment called for: {xorname:?}. Step to execute: {step:?}"); - info!( - "TEST: spawn_make_payment called for: {xorname:?}. Step to execute: {step:?}" - ); - - self.make_payment_collector - .push((upload_item.xorname(), quote.clone())); - } - None => { - println!( - "spawn_make_payment called with force make payment. Step to execute: {step:?}" - ); - info!("TEST: spawn_make_payment called with force make payment. Step to execute: {step:?}"); - } - } - - // gotta collect batch size before sending task result. - let _make_payment = self.make_payment_collector.len() >= self.payment_batch_size - || (to_send.is_none() && !self.make_payment_collector.is_empty()); - - match step { - // TestSteps::MakePaymentJustCollectItem => { - // // The test expected for us to just collect item, but if the logic wants us to make payment, then it as - // // error - // assert!(!make_payment); - // } - TestSteps::MakePaymentOk => { - let payment_proofs = std::mem::take(&mut self.make_payment_collector) - .into_iter() - .map(|(xorname, _)| { - ( - xorname, - ProofOfPayment { - quote: PaymentQuote::zero(), - tx_hash: TxHash::repeat_byte(0), - }, - ) - }) - .collect::>(); - // track the payments per xorname - for xorname in payment_proofs.keys() { - let entry = self.payments_made_per_xorname.entry(*xorname).or_insert(0); - *entry += 1; - } - - handle.spawn(async move { - task_result_sender - .send(TaskResult::MakePaymentsOk { payment_proofs }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::MakePaymentErr => { - let failed_xornames = std::mem::take(&mut self.make_payment_collector); - - handle.spawn(async move { - task_result_sender - .send(TaskResult::MakePaymentsErr { failed_xornames }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected MakePayment step. Got: {con:?}"), - } - } - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - _client: Client, - _previous_payments: Option<&Vec>, - _verify_store: bool, - _retry_strategy: RetryStrategy, - _task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a UploadItem step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_upload_item called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_upload_item called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::UploadItemOk => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::UploadOk(xorname)) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::UploadItemErr { io_error } => { - handle.spawn(async move { - let io_error = if io_error { - Some(Box::new(std::io::Error::new( - std::io::ErrorKind::Other, - "Test IO Error", - ))) - } else { - None - }; - task_result_sender - .send(TaskResult::UploadErr { xorname, io_error }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected UploadItem step. Got: {con:?}"), - } - } -} - -#[derive(Debug, Clone)] -pub enum TestSteps { - GetRegisterOk, - GetRegisterErr, - PushRegisterOk, - PushRegisterErr, - GetStoreCostOk { - trigger_zero_cost: bool, - assert_select_different_payee: bool, - }, - GetStoreCostErr { - assert_select_different_payee: bool, - }, - // MakePaymentJustCollectItem, - MakePaymentOk, - MakePaymentErr, - UploadItemOk, - UploadItemErr { - io_error: bool, - }, -} - -pub fn get_inner_uploader() -> Result<(InnerUploader, mpsc::Sender)> { - let client = build_unconnected_client()?; - - let mut inner = InnerUploader::new( - client, - EvmWallet::new_with_random_wallet(EvmNetwork::new_custom( - "http://localhost:63319/", - "0x5FbDB2315678afecb367f032d93F642f64180aa3", - "0x8464135c8F25Da09e49BC8782676a84730C318bC", - )), - ); - let (task_result_sender, task_result_receiver) = mpsc::channel(100); - inner.testing_task_channels = Some((task_result_sender.clone(), task_result_receiver)); - - Ok((inner, task_result_sender)) -} - -// Spawns two tasks. One is the actual upload task that will return an UploadStat when completed. -// The other is a one to collect all the UploadEvent emitted by the previous task. -pub fn start_uploading_with_steps( - mut inner_uploader: InnerUploader, - test_steps: VecDeque, - register_sk: BlsSecretKey, - task_result_sender: mpsc::Sender, -) -> ( - JoinHandle>, - JoinHandle>, -) { - let payment_batch_size = inner_uploader.cfg.payment_batch_size; - let mut upload_event_rx = inner_uploader.get_event_receiver(); - - let upload_handle = tokio::spawn(start_upload(Box::new(TestUploader { - inner: Some(inner_uploader), - test_steps, - task_result_sender, - make_payment_collector: Default::default(), - payments_made_per_xorname: Default::default(), - payment_batch_size, - register_sk, - }))); - - let event_handle = tokio::spawn(async move { - let mut events = vec![]; - while let Some(event) = upload_event_rx.recv().await { - events.push(event); - } - events - }); - - (upload_handle, event_handle) -} - -// Collect all the upload events into a list - -// Build a very simple client struct for testing. This does not connect to any network. -// The UploaderInterface eliminates the need for direct networking in tests. -pub fn build_unconnected_client() -> Result { - let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), true); - let (network, ..) = network_builder.build_client()?; - let client = Client { - network, - client_event_sender: Arc::new(None), - }; - Ok(client) -} - -// We don't perform any networking, so the paths can be dummy ones. -pub fn get_dummy_chunk_paths(num: usize, temp_dir: PathBuf) -> Vec<(XorName, PathBuf)> { - let mut rng = thread_rng(); - let mut chunks = Vec::with_capacity(num); - for _ in 0..num { - chunks.push((XorName::random(&mut rng), temp_dir.clone())); - } - chunks -} - -pub fn get_dummy_registers(num: usize, register_sk: &BlsSecretKey) -> Vec { - let mut rng = thread_rng(); - let mut registers = Vec::with_capacity(num); - for _ in 0..num { - // test_new_from_address that is used during get_register, - // uses AnyoneCanWrite permission, so use the same here - let address = RegisterAddress::new(XorName::random(&mut rng), register_sk.public_key()); - let base_register = SignedRegister::test_new_from_address(address, register_sk); - let register = Register::test_new_from_register(base_register); - registers.push(register); - } - registers -} diff --git a/autonomi/src/uploader/upload.rs b/autonomi/src/uploader/upload.rs deleted file mode 100644 index dac748549c..0000000000 --- a/autonomi/src/uploader/upload.rs +++ /dev/null @@ -1,1232 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - GetStoreCostStrategy, TaskResult, UploadCfg, UploadEvent, UploadItem, UploadSummary, Uploader, - UploaderInterface, PAYMENT_BATCH_SIZE, -}; -#[cfg(feature = "registers")] -use crate::client::registers::Register; -use crate::{uploader::UploadError, utils::payment_proof_from_quotes_and_payments, Client}; -use bytes::Bytes; -use itertools::Either; -use libp2p::{kad::Quorum, PeerId}; -use rand::{thread_rng, Rng}; -use sn_evm::{Amount, EvmWallet, ProofOfPayment}; -use sn_networking::target_arch::{mpsc, mpsc_channel, mpsc_recv, spawn}; -use sn_networking::{GetRecordCfg, PayeeQuote, PutRecordCfg, VerificationKind}; -#[cfg(feature = "data")] -use sn_protocol::{messages::ChunkProof, storage::Chunk}; -use sn_protocol::{storage::RetryStrategy, NetworkAddress}; -#[cfg(feature = "registers")] -use sn_registers::RegisterAddress; -use std::{ - collections::{HashMap, HashSet}, - num::NonZero, -}; -use xor_name::XorName; - -/// The maximum number of sequential payment failures before aborting the upload process. -#[cfg(not(test))] -const MAX_SEQUENTIAL_PAYMENT_FAILS: usize = 3; -#[cfg(test)] -const MAX_SEQUENTIAL_PAYMENT_FAILS: usize = 1; - -/// The maximum number of sequential network failures before aborting the upload process. -// todo: use uploader.retry_strategy.get_count() instead. -#[cfg(not(test))] -const MAX_SEQUENTIAL_NETWORK_ERRORS: usize = 32; -#[cfg(test)] -const MAX_SEQUENTIAL_NETWORK_ERRORS: usize = 1; - -/// The number of upload failures for a single data item before -#[cfg(not(test))] -const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 3; -#[cfg(test)] -const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 1; - -type Result = std::result::Result; - -// TODO: -// 1. track each batch with an id -// 2. create a irrecoverable error type, so we can bail on io/serialization etc. -// 3. separate cfgs/retries for register/chunk etc -// 4. log whenever we insert/remove items. i.e., don't ignore values with `let _` - -/// The main loop that performs the upload process. -/// An interface is passed here for easy testing. -pub(super) async fn start_upload( - mut interface: Box, -) -> Result { - let mut uploader = interface.take_inner_uploader(); - - uploader.validate_upload_cfg()?; - - // Take out the testing task senders if any. This is only set for tests. - let (task_result_sender, mut task_result_receiver) = - if let Some(channels) = uploader.testing_task_channels.take() { - channels - } else { - // 6 because of the 6 pipelines, 1 for redundancy. - mpsc_channel(uploader.cfg.batch_size * 6 + 1) - }; - let (make_payment_sender, make_payment_receiver) = mpsc_channel(uploader.cfg.batch_size); - - uploader.start_payment_processing_thread( - make_payment_receiver, - task_result_sender.clone(), - uploader.cfg.payment_batch_size, - )?; - - // chunks can be pushed to pending_get_store_cost directly - #[cfg(feature = "data")] - { - uploader.pending_to_get_store_cost = uploader - .all_upload_items - .iter() - .filter_map(|(xorname, item)| { - if let UploadItem::Chunk { .. } = item { - Some((*xorname, GetStoreCostStrategy::Cheapest)) - } else { - None - } - }) - .collect(); - } - - // registers have to be verified + merged with remote replica, so we have to fetch it first. - #[cfg(feature = "registers")] - { - uploader.pending_to_get_register = uploader - .all_upload_items - .iter() - .filter_map(|(_xorname, item)| { - if let UploadItem::Register { address, .. } = item { - Some(*address) - } else { - None - } - }) - .collect(); - } - - loop { - // Break if we have uploaded all the items. - // The loop also breaks if we fail to get_store_cost / make payment / upload for n consecutive times. - if uploader.all_upload_items.is_empty() { - debug!("Upload items are empty, exiting main upload loop."); - - // To avoid empty final_balance when all items are skipped. Skip for tests. - #[cfg(not(test))] - { - uploader.upload_final_balance = uploader - .wallet - .balance_of_tokens() - .await - .inspect_err(|err| { - error!("Failed to get wallet balance: {err:?}"); - })?; - } - - debug!("UPLOADER STATE: finished uploading all items {uploader:?}"); - let summary = UploadSummary { - storage_cost: uploader.tokens_spent, - final_balance: uploader.upload_final_balance, - uploaded_addresses: uploader.uploaded_addresses, - uploaded_count: uploader.uploaded_count, - skipped_count: uploader.skipped_count, - uploaded_registers: uploader.uploaded_registers, - }; - - if !uploader.max_repayments_reached.is_empty() { - error!( - "The maximum repayments were reached for these addresses: {:?}", - uploader.max_repayments_reached - ); - return Err(UploadError::MaximumRepaymentsReached { - items: uploader.max_repayments_reached.into_iter().collect(), - }); - } - - return Ok(summary); - } - - #[cfg(feature = "registers")] - { - // try to GET register if we have enough buffer. - // The results of the get & push register steps are used to fill up `pending_to_get_store` cost - // Since the get store cost list is the init state, we don't have to check if it is not full. - while !uploader.pending_to_get_register.is_empty() - && uploader.on_going_get_register.len() < uploader.cfg.batch_size - { - if let Some(reg_addr) = uploader.pending_to_get_register.pop() { - trace!("Conditions met for GET registers {:?}", reg_addr.xorname()); - let _ = uploader.on_going_get_register.insert(reg_addr.xorname()); - interface.submit_get_register_task( - uploader.client.clone(), - reg_addr, - task_result_sender.clone(), - ); - } - } - - // try to push register if we have enough buffer. - // No other checks for the same reason as the above step. - - while !uploader.pending_to_push_register.is_empty() - && uploader.on_going_get_register.len() < uploader.cfg.batch_size - { - let upload_item = uploader.pop_item_for_push_register()?; - trace!( - "Conditions met for push registers {:?}", - upload_item.xorname() - ); - let _ = uploader - .on_going_push_register - .insert(upload_item.xorname()); - interface.submit_push_register_task( - uploader.client.clone(), - upload_item, - uploader.cfg.verify_store, - task_result_sender.clone(), - ); - } - } - - // try to get store cost for an item if pending_to_pay needs items & if we have enough buffer. - while !uploader.pending_to_get_store_cost.is_empty() - && uploader.on_going_get_cost.len() < uploader.cfg.batch_size - && uploader.pending_to_pay.len() < uploader.cfg.payment_batch_size - { - let (xorname, address, get_store_cost_strategy) = - uploader.pop_item_for_get_store_cost()?; - trace!("Conditions met for get store cost. {xorname:?} {get_store_cost_strategy:?}",); - - let _ = uploader.on_going_get_cost.insert(xorname); - interface.submit_get_store_cost_task( - uploader.client.clone(), - xorname, - address, - uploader.payment_proofs.get(&xorname), - get_store_cost_strategy, - uploader.cfg.max_repayments_for_failed_data, - task_result_sender.clone(), - ); - } - - // try to make payment for an item if pending_to_upload needs items & if we have enough buffer. - while !uploader.pending_to_pay.is_empty() - && uploader.on_going_payments.len() < uploader.cfg.payment_batch_size - && uploader.pending_to_upload.len() < uploader.cfg.batch_size - { - let (upload_item, quote) = uploader.pop_item_for_make_payment()?; - trace!( - "Conditions met for making payments. {:?} {quote:?}", - upload_item.xorname() - ); - let _ = uploader.on_going_payments.insert(upload_item.xorname()); - - interface - .submit_make_payment_task(Some((upload_item, quote)), make_payment_sender.clone()); - } - - // try to upload if we have enough buffer to upload. - while !uploader.pending_to_upload.is_empty() - && uploader.on_going_uploads.len() < uploader.cfg.batch_size - { - #[cfg(test)] - trace!("UPLOADER STATE: upload_item : {uploader:?}"); - let upload_item = uploader.pop_item_for_upload_item()?; - let xorname = upload_item.xorname(); - - trace!("Conditions met for uploading. {xorname:?}"); - let _ = uploader.on_going_uploads.insert(xorname); - interface.submit_upload_item_task( - upload_item, - uploader.client.clone(), - uploader.payment_proofs.get(&xorname), - uploader.cfg.verify_store, - uploader.cfg.retry_strategy, - task_result_sender.clone(), - ); - } - - // Fire None to trigger a forced round of making leftover payments, if there are not enough store cost tasks - // to fill up the buffer. - if uploader.pending_to_get_store_cost.is_empty() - && uploader.on_going_get_cost.is_empty() - && !uploader.on_going_payments.is_empty() - && uploader.on_going_payments.len() < uploader.cfg.payment_batch_size - { - #[cfg(test)] - trace!("UPLOADER STATE: make_payment (forced): {uploader:?}"); - - debug!("There are not enough on going payments to trigger a batch Payment and no get_store_costs to fill the batch. Triggering forced round of payment"); - interface.submit_make_payment_task(None, make_payment_sender.clone()); - } - - #[cfg(test)] - trace!("UPLOADER STATE: before await task result: {uploader:?}"); - - trace!("Fetching task result"); - let task_result = mpsc_recv(&mut task_result_receiver) - .await - .ok_or(UploadError::InternalError)?; - trace!("Received task result: {task_result:?}"); - match task_result { - #[cfg(feature = "registers")] - TaskResult::GetRegisterFromNetworkOk { remote_register } => { - // if we got back the register, then merge & PUT it. - let xorname = remote_register.address().xorname(); - trace!("TaskResult::GetRegisterFromNetworkOk for remote register: {xorname:?} \n{remote_register:?}"); - let _ = uploader.on_going_get_register.remove(&xorname); - - let reg = uploader.all_upload_items.get_mut(&xorname).ok_or_else(|| { - error!("Register {xorname:?} not found in all_upload_items."); - UploadError::InternalError - })?; - if let UploadItem::Register { reg, .. } = reg { - reg.merge(&remote_register).inspect_err(|err| { - error!("Uploader failed to merge remote register: {err:?}"); - })?; - uploader.pending_to_push_register.push(xorname); - } - } - #[cfg(feature = "registers")] - TaskResult::GetRegisterFromNetworkErr(xorname) => { - // then the register is a new one. It can follow the same flow as chunks now. - let _ = uploader.on_going_get_register.remove(&xorname); - - uploader - .pending_to_get_store_cost - .push((xorname, GetStoreCostStrategy::Cheapest)); - } - #[cfg(feature = "registers")] - TaskResult::PushRegisterOk { updated_register } => { - // push modifies the register, so we return this instead of the one from all_upload_items - let xorname = updated_register.address().xorname(); - let _ = uploader.on_going_push_register.remove(&xorname); - uploader.skipped_count += 1; - let _ = uploader - .uploaded_addresses - .insert(NetworkAddress::from_register_address( - *updated_register.address(), - )); - - let _old_register = - uploader.all_upload_items.remove(&xorname).ok_or_else(|| { - error!("Register {xorname:?} not found in all_upload_items"); - UploadError::InternalError - })?; - - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*updated_register.address(), updated_register.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUpdated(updated_register)); - } - #[cfg(feature = "registers")] - TaskResult::PushRegisterErr(xorname) => { - // the register failed to be Pushed. Retry until failure. - let _ = uploader.on_going_push_register.remove(&xorname); - uploader.pending_to_push_register.push(xorname); - - uploader.push_register_errors += 1; - if uploader.push_register_errors > MAX_SEQUENTIAL_NETWORK_ERRORS { - error!("Max sequential network failures reached during PushRegisterErr."); - return Err(UploadError::SequentialNetworkErrors); - } - } - TaskResult::GetStoreCostOk { xorname, quote } => { - let _ = uploader.on_going_get_cost.remove(&xorname); - uploader.get_store_cost_errors = 0; // reset error if Ok. We only throw error after 'n' sequential errors - - trace!("GetStoreCostOk for {xorname:?}'s store_cost {:?}", quote.2); - - if !quote.2.cost.is_zero() { - uploader.pending_to_pay.push((xorname, quote)); - } - // if cost is 0, then it already in the network. - else { - // remove the item since we have uploaded it. - let removed_item = - uploader.all_upload_items.remove(&xorname).ok_or_else(|| { - error!("Uploadable item not found in all_upload_items: {xorname:?}"); - UploadError::InternalError - })?; - let _ = uploader.uploaded_addresses.insert(removed_item.address()); - trace!("{xorname:?} has store cost of 0 and it already exists on the network"); - uploader.skipped_count += 1; - - // if during the first try we skip the item, then it is already present in the network. - match removed_item { - #[cfg(feature = "data")] - UploadItem::Chunk { address, .. } => { - uploader.emit_upload_event(UploadEvent::ChunkAlreadyExistsInNetwork( - address, - )); - } - #[cfg(feature = "registers")] - UploadItem::Register { reg, .. } => { - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*reg.address(), reg.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUpdated(reg)); - } - } - } - } - TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - } => { - let _ = uploader.on_going_get_cost.remove(&xorname); - trace!("GetStoreCostErr for {xorname:?} , get_store_cost_strategy: {get_store_cost_strategy:?}, max_repayments_reached: {max_repayments_reached:?}"); - - // If max repayments reached, track it separately. Else retry get_store_cost. - if max_repayments_reached { - error!("Max repayments reached for {xorname:?}. Skipping upload for it"); - uploader.max_repayments_reached.insert(xorname); - uploader.all_upload_items.remove(&xorname); - } else { - // use the same strategy. The repay different payee is set only if upload fails. - uploader - .pending_to_get_store_cost - .push((xorname, get_store_cost_strategy.clone())); - } - uploader.get_store_cost_errors += 1; - if uploader.get_store_cost_errors > MAX_SEQUENTIAL_NETWORK_ERRORS { - error!("Max sequential network failures reached during GetStoreCostErr."); - return Err(UploadError::SequentialNetworkErrors); - } - } - TaskResult::MakePaymentsOk { payment_proofs } => { - let tokens_spent = payment_proofs - .values() - .map(|proof| proof.quote.cost.as_atto()) - .try_fold(Amount::from(0), |acc, cost| acc.checked_add(cost)) - .ok_or_else(|| { - error!("Overflow when summing up tokens spent"); - UploadError::InternalError - })?; - trace!( - "MakePaymentsOk for {} items, with {tokens_spent:?} tokens.", - payment_proofs.len(), - ); - for xorname in payment_proofs.keys() { - let _ = uploader.on_going_payments.remove(xorname); - } - uploader - .pending_to_upload - .extend(payment_proofs.keys().cloned()); - for (xorname, proof) in payment_proofs { - if let Some(payments) = uploader.payment_proofs.get_mut(&xorname) { - payments.push(proof) - } else { - uploader.payment_proofs.insert(xorname, vec![proof]); - } - } - // reset sequential payment fail error if ok. We throw error if payment fails continuously more than - // MAX_SEQUENTIAL_PAYMENT_FAILS errors. - uploader.make_payments_errors = 0; - uploader.tokens_spent = uploader - .tokens_spent - .checked_add(tokens_spent) - .ok_or_else(|| { - error!("Overflow when summing up tokens spent for summary."); - UploadError::InternalError - })?; - - uploader.emit_upload_event(UploadEvent::PaymentMade { tokens_spent }); - } - TaskResult::MakePaymentsErr { failed_xornames } => { - trace!("MakePaymentsErr for {:?} items", failed_xornames.len()); - // TODO: handle insufficient balance error - - for (xorname, quote) in failed_xornames { - let _ = uploader.on_going_payments.remove(&xorname); - uploader.pending_to_pay.push((xorname, quote)); - } - uploader.make_payments_errors += 1; - - if uploader.make_payments_errors >= MAX_SEQUENTIAL_PAYMENT_FAILS { - error!("Max sequential upload failures reached during MakePaymentsErr."); - // Too many sequential overall payment failure indicating - // unrecoverable failure of spend tx continuously rejected by network. - // The entire upload process shall be terminated. - return Err(UploadError::SequentialUploadPaymentError); - } - } - TaskResult::UploadOk(xorname) => { - let _ = uploader.on_going_uploads.remove(&xorname); - uploader.uploaded_count += 1; - trace!("UploadOk for {xorname:?}"); - // remove the previous payments - uploader.payment_proofs.remove(&xorname); - // remove the item since we have uploaded it. - let removed_item = uploader.all_upload_items.remove(&xorname).ok_or_else(|| { - error!("Uploadable item not found in all_upload_items: {xorname:?}"); - UploadError::InternalError - })?; - let _ = uploader.uploaded_addresses.insert(removed_item.address()); - - match removed_item { - #[cfg(feature = "data")] - UploadItem::Chunk { address, .. } => { - uploader.emit_upload_event(UploadEvent::ChunkUploaded(address)); - } - #[cfg(feature = "registers")] - UploadItem::Register { reg, .. } => { - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*reg.address(), reg.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUploaded(reg)); - } - } - } - TaskResult::UploadErr { xorname, io_error } => { - if let Some(io_error) = io_error { - error!( - "Upload failed for {xorname:?} with error: {io_error:?}. Stopping upload." - ); - return Err(UploadError::Io(*io_error)); - } - - let _ = uploader.on_going_uploads.remove(&xorname); - debug!("UploadErr for {xorname:?}. Keeping track of failure and trying again."); - - // keep track of the failure - let n_errors = uploader.n_errors_during_uploads.entry(xorname).or_insert(0); - *n_errors += 1; - - // if quote has expired, don't retry the upload again. Instead get the cheapest quote again. - if *n_errors > UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE { - // if error > threshold, then select different payee. else retry again - // Also reset n_errors as we want to enable retries for the new payee. - *n_errors = 0; - debug!("Max error during upload reached for {xorname:?}. Selecting a different payee."); - - uploader - .pending_to_get_store_cost - .push((xorname, GetStoreCostStrategy::SelectDifferentPayee)); - } else { - uploader.pending_to_upload.push(xorname); - } - } - } - } -} - -impl UploaderInterface for Uploader { - fn take_inner_uploader(&mut self) -> InnerUploader { - self.inner - .take() - .expect("Uploader::new makes sure inner is present") - } - - fn submit_get_store_cost_task( - &mut self, - client: Client, - xorname: XorName, - address: NetworkAddress, - previous_payments: Option<&Vec>, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - task_result_sender: mpsc::Sender, - ) { - trace!("Spawning get_store_cost for {xorname:?}"); - let previous_payments_to = if let Some(previous_payments) = previous_payments { - let peer_ids = previous_payments - .iter() - .map(|payment_proof| { - payment_proof - .to_peer_id_payee() - .ok_or_else(|| { - error!("Invalid payment proof found, could not obtain peer_id {payment_proof:?}"); - UploadError::InternalError - }) - }) - .collect::>>(); - peer_ids - } else { - Ok(vec![]) - }; - - let _handle = spawn(async move { - let task_result = match InnerUploader::get_store_cost( - client, - xorname, - address, - get_store_cost_strategy.clone(), - previous_payments_to, - max_repayments_for_failed_data, - ) - .await - { - Ok(quote) => { - debug!("StoreCosts retrieved for {xorname:?} quote: {quote:?}"); - TaskResult::GetStoreCostOk { - xorname, - quote: Box::new(quote), - } - } - Err(err) => { - error!("Encountered error {err:?} when getting store_cost for {xorname:?}",); - - let max_repayments_reached = - matches!(&err, UploadError::MaximumRepaymentsReached { .. }); - - TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - } - } - }; - - let _ = task_result_sender.send(task_result).await; - }); - } - - #[cfg(feature = "registers")] - fn submit_get_register_task( - &mut self, - client: Client, - reg_addr: RegisterAddress, - task_result_sender: mpsc::Sender, - ) { - let xorname = reg_addr.xorname(); - trace!("Spawning get_register for {xorname:?}"); - let _handle = spawn(async move { - let task_result = match InnerUploader::get_register(client, reg_addr).await { - Ok(register) => { - debug!("Register retrieved for {xorname:?}"); - TaskResult::GetRegisterFromNetworkOk { - remote_register: register, - } - } - Err(err) => { - // todo match on error to only skip if GetRecordError - warn!("Encountered error {err:?} during get_register. The register has to be PUT as it is a new one."); - TaskResult::GetRegisterFromNetworkErr(xorname) - } - }; - let _ = task_result_sender.send(task_result).await; - }); - } - - #[cfg(feature = "registers")] - fn submit_push_register_task( - &mut self, - client: Client, - upload_item: UploadItem, - verify_store: bool, - task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - trace!("Spawning push_register for {xorname:?}"); - let _handle = spawn(async move { - let task_result = match InnerUploader::push_register(client, upload_item, verify_store) - .await - { - Ok(reg) => { - debug!("Register pushed: {xorname:?}"); - TaskResult::PushRegisterOk { - updated_register: reg, - } - } - Err(err) => { - // todo match on error to only skip if GetRecordError - error!("Encountered error {err:?} during push_register. The register might not be present in the network"); - TaskResult::PushRegisterErr(xorname) - } - }; - let _ = task_result_sender.send(task_result).await; - }); - } - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - make_payment_sender: mpsc::Sender)>>, - ) { - let _handle = spawn(async move { - let _ = make_payment_sender.send(to_send).await; - }); - } - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - client: Client, - previous_payments: Option<&Vec>, - verify_store: bool, - retry_strategy: RetryStrategy, - task_result_sender: mpsc::Sender, - ) { - trace!("Spawning upload item task for {:?}", upload_item.xorname()); - - let last_payment = previous_payments.and_then(|payments| payments.last().cloned()); - - let _handle = spawn(async move { - let xorname = upload_item.xorname(); - let result = InnerUploader::upload_item( - client, - upload_item, - last_payment, - verify_store, - retry_strategy, - ) - .await; - - trace!("Upload item {xorname:?} uploaded with result {result:?}"); - match result { - Ok(_) => { - let _ = task_result_sender.send(TaskResult::UploadOk(xorname)).await; - } - Err(UploadError::Io(io_error)) => { - let _ = task_result_sender - .send(TaskResult::UploadErr { - xorname, - io_error: Some(Box::new(io_error)), - }) - .await; - } - Err(_) => { - let _ = task_result_sender - .send(TaskResult::UploadErr { - xorname, - io_error: None, - }) - .await; - } - }; - }); - } -} - -/// `Uploader` provides functionality for uploading both Chunks and Registers with support for retries and queuing. -/// This struct is not cloneable. To create a new instance with default configuration, use the `new` function. -/// To modify the configuration, use the provided setter methods (`set_...` functions). -#[derive(custom_debug::Debug)] -pub(super) struct InnerUploader { - pub(super) cfg: UploadCfg, - #[debug(skip)] - pub(super) client: Client, - #[debug(skip)] - pub(super) wallet: EvmWallet, - - // states - pub(super) all_upload_items: HashMap, - #[cfg(feature = "registers")] - pub(super) pending_to_get_register: Vec, - #[cfg(feature = "registers")] - pub(super) pending_to_push_register: Vec, - pub(super) pending_to_get_store_cost: Vec<(XorName, GetStoreCostStrategy)>, - pub(super) pending_to_pay: Vec<(XorName, Box)>, - pub(super) pending_to_upload: Vec, - pub(super) payment_proofs: HashMap>, - - // trackers - #[cfg(feature = "registers")] - pub(super) on_going_get_register: HashSet, - #[cfg(feature = "registers")] - pub(super) on_going_push_register: HashSet, - pub(super) on_going_get_cost: HashSet, - pub(super) on_going_payments: HashSet, - pub(super) on_going_uploads: HashSet, - - // error trackers - pub(super) n_errors_during_uploads: HashMap, - #[cfg(feature = "registers")] - pub(super) push_register_errors: usize, - pub(super) get_store_cost_errors: usize, - pub(super) make_payments_errors: usize, - - // Upload summary - pub(super) tokens_spent: Amount, - pub(super) upload_final_balance: Amount, - pub(super) max_repayments_reached: HashSet, - pub(super) uploaded_addresses: HashSet, - #[cfg(feature = "registers")] - pub(super) uploaded_registers: HashMap, - pub(super) uploaded_count: usize, - pub(super) skipped_count: usize, - - // Task channels for testing. Not used in actual code. - pub(super) testing_task_channels: - Option<(mpsc::Sender, mpsc::Receiver)>, - - // Public events events - #[debug(skip)] - pub(super) logged_event_sender_absence: bool, - #[debug(skip)] - pub(super) event_sender: Option>, -} - -impl InnerUploader { - pub(super) fn new(client: Client, wallet: EvmWallet) -> Self { - Self { - cfg: Default::default(), - client, - wallet, - - all_upload_items: Default::default(), - #[cfg(feature = "registers")] - pending_to_get_register: Default::default(), - #[cfg(feature = "registers")] - pending_to_push_register: Default::default(), - pending_to_get_store_cost: Default::default(), - pending_to_pay: Default::default(), - pending_to_upload: Default::default(), - payment_proofs: Default::default(), - - #[cfg(feature = "registers")] - on_going_get_register: Default::default(), - #[cfg(feature = "registers")] - on_going_push_register: Default::default(), - on_going_get_cost: Default::default(), - on_going_payments: Default::default(), - on_going_uploads: Default::default(), - - n_errors_during_uploads: Default::default(), - #[cfg(feature = "registers")] - push_register_errors: Default::default(), - get_store_cost_errors: Default::default(), - max_repayments_reached: Default::default(), - make_payments_errors: Default::default(), - - tokens_spent: Amount::from(0), - upload_final_balance: Amount::from(0), - uploaded_addresses: Default::default(), - #[cfg(feature = "registers")] - uploaded_registers: Default::default(), - uploaded_count: Default::default(), - skipped_count: Default::default(), - - testing_task_channels: None, - logged_event_sender_absence: Default::default(), - event_sender: Default::default(), - } - } - - // ====== Pop items ====== - - #[cfg(feature = "registers")] - fn pop_item_for_push_register(&mut self) -> Result { - if let Some(name) = self.pending_to_push_register.pop() { - let upload_item = self.all_upload_items.get(&name).cloned().ok_or_else(|| { - error!("Uploadable item not found in all_upload_items: {name:?}"); - UploadError::InternalError - })?; - Ok(upload_item) - } else { - // the caller will be making sure this does not happen. - error!("No item found for push register"); - Err(UploadError::InternalError) - } - } - - fn pop_item_for_get_store_cost( - &mut self, - ) -> Result<(XorName, NetworkAddress, GetStoreCostStrategy)> { - let (xorname, strategy) = self.pending_to_get_store_cost.pop().ok_or_else(|| { - error!("No item found for get store cost"); - UploadError::InternalError - })?; - let address = self - .all_upload_items - .get(&xorname) - .map(|item| item.address()) - .ok_or_else(|| { - error!("Uploadable item not found in all_upload_items: {xorname:?}"); - UploadError::InternalError - })?; - Ok((xorname, address, strategy)) - } - - fn pop_item_for_make_payment(&mut self) -> Result<(UploadItem, Box)> { - if let Some((name, quote)) = self.pending_to_pay.pop() { - let upload_item = self.all_upload_items.get(&name).cloned().ok_or_else(|| { - error!("Uploadable item not found in all_upload_items: {name:?}"); - UploadError::InternalError - })?; - Ok((upload_item, quote)) - } else { - // the caller will be making sure this does not happen. - error!("No item found for make payment"); - Err(UploadError::InternalError) - } - } - - fn pop_item_for_upload_item(&mut self) -> Result { - if let Some(name) = self.pending_to_upload.pop() { - let upload_item = self.all_upload_items.get(&name).cloned().ok_or_else(|| { - error!("Uploadable item not found in all_upload_items: {name:?}"); - UploadError::InternalError - })?; - Ok(upload_item) - } else { - // the caller will be making sure this does not happen. - error!("No item found for upload item"); - Err(UploadError::InternalError) - } - } - - // ====== Processing Loop ====== - - // This is spawned as a long running task to prevent us from reading the wallet files - // each time we have to make a payment. - fn start_payment_processing_thread( - &self, - mut make_payment_receiver: mpsc::Receiver)>>, - task_result_sender: mpsc::Sender, - payment_batch_size: usize, - ) -> Result<()> { - let wallet = self.wallet.clone(); - - let _handle = spawn(async move { - debug!("Spawning the long running make payment processing loop."); - - let mut to_be_paid_list = Vec::new(); - let mut cost_map = HashMap::new(); - - let mut got_a_previous_force_payment = false; - while let Some(payment) = mpsc_recv(&mut make_payment_receiver).await { - let make_payments = if let Some((item, quote)) = payment { - to_be_paid_list.push(( - quote.2.hash(), - quote.2.rewards_address, - quote.2.cost.as_atto(), - )); - let xorname = item.xorname(); - debug!("Inserted {xorname:?} into to_be_paid_list"); - - let _ = cost_map.insert(xorname, (quote.0, quote.1, quote.2)); - cost_map.len() >= payment_batch_size || got_a_previous_force_payment - } else { - // using None to indicate as all paid. - let make_payments = !cost_map.is_empty(); - debug!("Got a forced forced round of make payment."); - // Note: There can be a mismatch of ordering between the main loop and the make payment loop because - // the instructions are sent via a task(channel.send().await). And there is no guarantee for the - // order to come in the same order as they were sent. - // - // We cannot just disobey the instruction inside the child loop, as the mainloop would be expecting - // a result back for a particular instruction. - if !make_payments { - got_a_previous_force_payment = true; - warn!( - "We were told to force make payment, but cost_map is empty, so we can't do that just yet. Waiting for a task to insert a quote into cost_map" - ) - } - - make_payments - }; - - if make_payments { - // reset force_make_payment - if got_a_previous_force_payment { - info!("A task inserted a quote into cost_map, so we can now make a forced round of payment!"); - got_a_previous_force_payment = false; - } - - let terminate_process = false; - let data_payments = std::mem::take(&mut to_be_paid_list); - - let result = match wallet.pay_for_quotes(data_payments).await { - Ok(payments) => { - trace!("Made payments for {} records.", payments.len()); - - let payment_proofs = - payment_proof_from_quotes_and_payments(&cost_map, &payments); - - TaskResult::MakePaymentsOk { payment_proofs } - } - Err(err) => { - let error = err.0; - let _succeeded_batch = err.1; - - error!("When paying {} data, got error {error:?}", cost_map.len(),); - // TODO: match on insufficient gas/token error. and set terminate_process = true - TaskResult::MakePaymentsErr { - failed_xornames: cost_map - .into_iter() - .map(|(k, v)| (k, Box::new(v))) - .collect(), - } - } - }; - let result_sender = task_result_sender.clone(); - let _handle = spawn(async move { - let _ = result_sender.send(result).await; - }); - - cost_map = HashMap::new(); - - if terminate_process { - // The error will trigger the entire upload process to be terminated. - // Hence here we shall terminate the inner loop first, - // to avoid the wallet going further to be potentially got corrupted. - warn!( - "Terminating make payment processing loop due to un-recoverable error." - ); - break; - } - } - } - debug!("Make payment processing loop terminated."); - }); - Ok(()) - } - - // ====== Logic ====== - - #[cfg(feature = "registers")] - async fn get_register(client: Client, reg_addr: RegisterAddress) -> Result { - let reg = client.register_get(reg_addr).await?; - Ok(reg) - } - - #[cfg(feature = "registers")] - async fn push_register( - client: Client, - upload_item: UploadItem, - verify_store: bool, - ) -> Result { - let register = if let UploadItem::Register { reg, .. } = upload_item { - reg - } else { - error!("Invalid upload item found: {upload_item:?}"); - return Err(UploadError::InternalError); - }; - - let verification = if verify_store { - let get_cfg = GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: Some(RetryStrategy::default()), - target_record: None, - expected_holders: Default::default(), - is_register: true, - }; - Some((VerificationKind::Network, get_cfg)) - } else { - None - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: None, - use_put_record_to: None, - verification, - }; - - client.register_upload(®ister, None, &put_cfg).await?; - - Ok(register) - } - - async fn get_store_cost( - client: Client, - xorname: XorName, - address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - previous_payments_to: Result>, - max_repayments_for_failed_data: usize, - ) -> Result { - let filter_list = match get_store_cost_strategy { - GetStoreCostStrategy::Cheapest => vec![], - GetStoreCostStrategy::SelectDifferentPayee => { - let filter_list = previous_payments_to?; - - // if we have already made initial + max_repayments, then we should error out. - if Self::have_we_reached_max_repayments( - filter_list.len(), - max_repayments_for_failed_data, - ) { - // error is used by the caller. - return Err(UploadError::MaximumRepaymentsReached { - items: vec![xorname], - }); - } - - debug!("Filtering out payments from {filter_list:?} during get_store_cost for {xorname:?}"); - filter_list - } - }; - let quote = client - .network - .get_store_costs_from_network(address, filter_list) - .await?; - Ok(quote) - } - - async fn upload_item( - client: Client, - upload_item: UploadItem, - previous_payments: Option, - verify_store: bool, - retry_strategy: RetryStrategy, - ) -> Result<()> { - let xorname = upload_item.xorname(); - - let payment_proof = previous_payments.ok_or_else(|| { - error!("No payment proof found for {xorname:?}"); - UploadError::InternalError - })?; - let payee = payment_proof.to_peer_id_payee().ok_or_else(|| { - error!("Invalid payment proof found, could not obtain peer_id {payment_proof:?}"); - UploadError::InternalError - })?; - - debug!("Payments for upload item: {xorname:?} to {payee:?}: {payment_proof:?}"); - - match upload_item { - #[cfg(feature = "data")] - UploadItem::Chunk { address: _, chunk } => { - let chunk = match chunk { - Either::Left(chunk) => chunk, - Either::Right(path) => { - let bytes = std::fs::read(&path).inspect_err(|err| { - error!("Error reading chunk at {path:?}: {err:?}"); - })?; - Chunk::new(Bytes::from(bytes)) - } - }; - - let verification = if verify_store { - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::N(NonZero::new(2).expect("2 is non-zero")), - retry_strategy: Some(retry_strategy), - target_record: None, - expected_holders: Default::default(), - is_register: false, - }; - - let random_nonce = thread_rng().gen::(); - let expected_proof = - ChunkProof::from_chunk(&chunk, random_nonce).map_err(|err| { - error!("Failed to create chunk proof: {err:?}"); - UploadError::Serialization(format!( - "Failed to create chunk proof for {xorname:?}" - )) - })?; - - Some(( - VerificationKind::ChunkProof { - expected_proof, - nonce: random_nonce, - }, - verification_cfg, - )) - } else { - None - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy: Some(retry_strategy), - use_put_record_to: Some(vec![payee]), - verification, - }; - - debug!("Client upload started for chunk: {xorname:?}"); - client - .chunk_upload_with_payment(chunk, payment_proof, Some(put_cfg)) - .await?; - - debug!("Client upload completed for chunk: {xorname:?}"); - } - #[cfg(feature = "registers")] - UploadItem::Register { address: _, reg } => { - debug!("Client upload started for register: {xorname:?}"); - let verification = if verify_store { - let get_cfg = GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: Some(retry_strategy), - target_record: None, - expected_holders: Default::default(), - is_register: true, - }; - Some((VerificationKind::Network, get_cfg)) - } else { - None - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: Some(retry_strategy), - use_put_record_to: Some(vec![payee]), - verification, - }; - client - .register_upload(®, Some(&payment_proof), &put_cfg) - .await?; - debug!("Client upload completed for register: {xorname:?}"); - } - } - - Ok(()) - } - - // ====== Misc ====== - - fn emit_upload_event(&mut self, event: UploadEvent) { - if let Some(sender) = self.event_sender.as_ref() { - let sender_clone = sender.clone(); - let _handle = spawn(async move { - if let Err(err) = sender_clone.send(event).await { - error!("Error emitting upload event: {err:?}"); - } - }); - } else if !self.logged_event_sender_absence { - info!("FilesUpload upload event sender is not set. Use get_upload_events() if you need to keep track of the progress"); - self.logged_event_sender_absence = true; - } - } - - /// If we have already made initial + max_repayments_allowed, then we should error out. - // separate function as it is used in test. - pub(super) fn have_we_reached_max_repayments( - payments_made: usize, - max_repayments_allowed: usize, - ) -> bool { - // if max_repayments_allowed = 1, then we have reached capacity = true if 2 payments have been made. i.e., - // i.e., 1 initial + 1 repayment. - payments_made > max_repayments_allowed - } - - fn validate_upload_cfg(&self) -> Result<()> { - if self.cfg.payment_batch_size > PAYMENT_BATCH_SIZE { - error!("Payment batch size is greater than the maximum allowed: {PAYMENT_BATCH_SIZE}"); - return Err(UploadError::InvalidCfg(format!( - "Payment batch size is greater than the maximum allowed: {PAYMENT_BATCH_SIZE}" - ))); - } - if self.cfg.payment_batch_size < 1 { - error!("Payment batch size cannot be less than 1"); - return Err(UploadError::InvalidCfg( - "Payment batch size cannot be less than 1".to_string(), - )); - } - if self.cfg.batch_size < 1 { - error!("Batch size cannot be less than 1"); - return Err(UploadError::InvalidCfg( - "Batch size cannot be less than 1".to_string(), - )); - } - - Ok(()) - } -} diff --git a/autonomi/src/utils.rs b/autonomi/src/utils.rs index a7273f9bae..fc9ceb7718 100644 --- a/autonomi/src/utils.rs +++ b/autonomi/src/utils.rs @@ -1,15 +1,14 @@ -use sn_evm::{ProofOfPayment, QuoteHash, TxHash}; -use sn_networking::PayeeQuote; +use sn_evm::{PaymentQuote, ProofOfPayment, QuoteHash, TxHash}; use std::collections::{BTreeMap, HashMap}; use xor_name::XorName; pub fn payment_proof_from_quotes_and_payments( - quotes: &HashMap, + quotes: &HashMap, payments: &BTreeMap, ) -> HashMap { quotes .iter() - .filter_map(|(xor_name, (_, _, quote))| { + .filter_map(|(xor_name, quote)| { payments.get("e.hash()).map(|tx_hash| { ( *xor_name, diff --git a/autonomi/tests/fs.rs b/autonomi/tests/fs.rs index 70787dee0f..b952852bc2 100644 --- a/autonomi/tests/fs.rs +++ b/autonomi/tests/fs.rs @@ -97,7 +97,7 @@ async fn file_into_vault() -> Result<()> { .await?; // now assert over the stored account packet - let new_client = Client::connect(&peers_from_env()?).await?; + let new_client = Client::connect(&[]).await?; let (ap, got_version) = new_client.fetch_and_decrypt_vault(&client_sk).await?; assert_eq!(set_version, got_version); diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index a68fe4a01e..49956db39e 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -13,7 +13,6 @@ pub use evmlib::common::Address as RewardsAddress; pub use evmlib::common::Address as EvmAddress; pub use evmlib::common::QuotePayment; pub use evmlib::common::{QuoteHash, TxHash}; -pub use evmlib::contract::network_token::Error as EvmNetworkTokenError; pub use evmlib::cryptography; #[cfg(feature = "external-signer")] pub use evmlib::external_signer; diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 9c76065bf0..4f2270ff37 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -94,7 +94,6 @@ workspace = true crate-type = ["cdylib", "rlib"] [target.'cfg(target_arch = "wasm32")'.dependencies] -async-channel = "2.3.1" getrandom = { version = "0.2.12", features = ["js"] } libp2p = { version = "0.54.1", features = [ "tokio", diff --git a/sn_networking/src/target_arch.rs b/sn_networking/src/target_arch.rs index b53ce472c5..35a1b62092 100644 --- a/sn_networking/src/target_arch.rs +++ b/sn_networking/src/target_arch.rs @@ -19,33 +19,12 @@ pub use tokio::{ #[cfg(target_arch = "wasm32")] pub use std::time::Duration; -#[cfg(target_arch = "wasm32")] -pub use wasm_bindgen_futures::spawn_local as spawn; + #[cfg(target_arch = "wasm32")] pub use wasmtimer::{ std::{Instant, SystemTime, UNIX_EPOCH}, tokio::{interval, sleep, timeout, Interval}, }; -/// === Channels ==== - -#[cfg(not(target_arch = "wasm32"))] -pub use tokio::sync::mpsc; -#[cfg(not(target_arch = "wasm32"))] -pub use tokio::sync::mpsc::channel as mpsc_channel; - -#[cfg(not(target_arch = "wasm32"))] -pub async fn mpsc_recv(mpsc: &mut mpsc::Receiver) -> Option { - mpsc.recv().await -} - -// futures crate has different function signatures than tokio, so instead we use async_channel here. -#[cfg(target_arch = "wasm32")] -pub use async_channel as mpsc; #[cfg(target_arch = "wasm32")] -pub use async_channel::bounded as mpsc_channel; - -#[cfg(target_arch = "wasm32")] -pub async fn mpsc_recv(mpsc: &mut mpsc::Receiver) -> Option { - mpsc.recv().await.ok() -} +pub use wasm_bindgen_futures::spawn_local as spawn; diff --git a/sn_protocol/src/messages/chunk_proof.rs b/sn_protocol/src/messages/chunk_proof.rs index 4fa3900d1f..145aae00de 100644 --- a/sn_protocol/src/messages/chunk_proof.rs +++ b/sn_protocol/src/messages/chunk_proof.rs @@ -6,8 +6,6 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::storage::{try_serialize_record, Chunk, RecordKind}; -use crate::Error; use serde::{Deserialize, Serialize}; use std::fmt; @@ -26,13 +24,6 @@ impl ChunkProof { ChunkProof(hash) } - pub fn from_chunk(chunk: &Chunk, nonce: Nonce) -> Result { - let stored_on_node = try_serialize_record(chunk, RecordKind::Chunk)?.to_vec(); - let proof = ChunkProof::new(&stored_on_node, nonce); - - Ok(proof) - } - pub fn verify(&self, other_proof: &ChunkProof) -> bool { self.0 == other_proof.0 } From 8fa5d603f048b57af864f669792da5e73dd5f4fd Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 28 Oct 2024 09:31:58 +0900 Subject: [PATCH 097/128] chore: unrelevant -> irrelevant --- sn_networking/src/cmd.rs | 10 +++++----- sn_networking/src/lib.rs | 4 ++-- sn_networking/src/record_store.rs | 2 +- sn_networking/src/record_store_api.rs | 6 +++--- sn_node/src/node.rs | 8 ++++---- sn_node/src/replication.rs | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index e2d92edc22..5ae19c7a4a 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -141,7 +141,7 @@ pub enum LocalSwarmCmd { /// NOTE: This does result in outgoing messages, but is produced locally TriggerIntervalReplication, /// Triggers unrelevant record cleanup - TriggerUnrelevantRecordCleanup, + TriggerIrrelevantRecordCleanup, } /// Commands to send to the Swarm @@ -292,7 +292,7 @@ impl Debug for LocalSwarmCmd { LocalSwarmCmd::TriggerIntervalReplication => { write!(f, "LocalSwarmCmd::TriggerIntervalReplication") } - LocalSwarmCmd::TriggerUnrelevantRecordCleanup => { + LocalSwarmCmd::TriggerIrrelevantRecordCleanup => { write!(f, "LocalSwarmCmd::TriggerUnrelevantRecordCleanup") } } @@ -848,13 +848,13 @@ impl SwarmDriver { self.send_event(NetworkEvent::KeysToFetchForReplication(new_keys_to_fetch)); } } - LocalSwarmCmd::TriggerUnrelevantRecordCleanup => { - cmd_string = "TriggerUnrelevantRecordCleanup"; + LocalSwarmCmd::TriggerIrrelevantRecordCleanup => { + cmd_string = "TriggerIrrelevantRecordCleanup"; self.swarm .behaviour_mut() .kademlia .store_mut() - .cleanup_unrelevant_records(); + .cleanup_irrelevant_records(); } } diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index a275567c05..01e5d6c9f6 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -1152,8 +1152,8 @@ impl Network { self.send_local_swarm_cmd(LocalSwarmCmd::QuoteVerification { quotes }); } - pub fn trigger_unrelevant_record_cleanup(&self) { - self.send_local_swarm_cmd(LocalSwarmCmd::TriggerUnrelevantRecordCleanup) + pub fn trigger_irrelevant_record_cleanup(&self) { + self.send_local_swarm_cmd(LocalSwarmCmd::TriggerIrrelevantRecordCleanup) } /// Helper to send NetworkSwarmCmd diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 394042d794..fcd6ce99f9 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -454,7 +454,7 @@ impl NodeRecordStore { // those `out of range` records shall be cleaned up. // This is to avoid `over-quoting` during restart, when RT is not fully populated, // result in mis-calculation of relevant records. - pub fn cleanup_unrelevant_records(&mut self) { + pub fn cleanup_irrelevant_records(&mut self) { let accumulated_records = self.records.len(); if accumulated_records < MAX_RECORDS_COUNT * 6 / 10 { return; diff --git a/sn_networking/src/record_store_api.rs b/sn_networking/src/record_store_api.rs index 64fd790ccd..53cea6701e 100644 --- a/sn_networking/src/record_store_api.rs +++ b/sn_networking/src/record_store_api.rs @@ -159,12 +159,12 @@ impl UnifiedRecordStore { }; } - pub(crate) fn cleanup_unrelevant_records(&mut self) { + pub(crate) fn cleanup_irrelevant_records(&mut self) { match self { Self::Client(_store) => { - warn!("Calling cleanup_unrelevant_records at Client. This should not happen"); + warn!("Calling cleanup_irrelevant_records at Client. This should not happen"); } - Self::Node(store) => store.cleanup_unrelevant_records(), + Self::Node(store) => store.cleanup_irrelevant_records(), } } } diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 0d74551751..3ac1aae6bb 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -272,9 +272,9 @@ impl Node { tokio::time::interval(UPTIME_METRICS_UPDATE_INTERVAL); let _ = uptime_metrics_update_interval.tick().await; // first tick completes immediately - let mut unrelevant_records_cleanup_interval = + let mut irrelevant_records_cleanup_interval = tokio::time::interval(UNRELEVANT_RECORDS_CLEANUP_INTERVAL); - let _ = unrelevant_records_cleanup_interval.tick().await; // first tick completes immediately + let _ = irrelevant_records_cleanup_interval.tick().await; // first tick completes immediately loop { let peers_connected = &peers_connected; @@ -333,11 +333,11 @@ impl Node { let _ = metrics_recorder.uptime.set(metrics_recorder.started_instant.elapsed().as_secs() as i64); } } - _ = unrelevant_records_cleanup_interval.tick() => { + _ = irrelevant_records_cleanup_interval.tick() => { let network = self.network().clone(); let _handle = spawn(async move { - Self::trigger_unrelevant_record_cleanup(network); + Self::trigger_irrelevant_record_cleanup(network); }); } } diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index 80ec25b157..bc3496b750 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -29,8 +29,8 @@ impl Node { } /// Cleanup unrelevant records if accumulated too many. - pub(crate) fn trigger_unrelevant_record_cleanup(network: Network) { - network.trigger_unrelevant_record_cleanup() + pub(crate) fn trigger_irrelevant_record_cleanup(network: Network) { + network.trigger_irrelevant_record_cleanup() } /// Get the Record from a peer or from the network without waiting. From 4fd95c190e65a2315db5753712b662eb3a532287 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 28 Oct 2024 09:59:28 +0900 Subject: [PATCH 098/128] feat(networking): par_iter for relevant record check --- sn_networking/src/record_store.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index fcd6ce99f9..cb9a8946db 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -700,12 +700,16 @@ impl NodeRecordStore { self.records.len() ); + // Pre-calculate the local_key distance once + let local_key_distance = |key: &&Key| { + let kbucket_key = KBucketKey::new(key.to_vec()); + self.local_key.distance(&kbucket_key) + }; + + // Use par_iter() for parallel processing and any() to short-circuit let relevant_records_len = records - .iter() - .filter(|key| { - let kbucket_key = KBucketKey::new(key.to_vec()); - distance_range >= self.local_key.distance(&kbucket_key) - }) + .par_iter() // Process in parallel + .filter(|key| distance_range >= local_key_distance(key)) .count(); Marker::CloseRecordsLen(relevant_records_len).log(); From 6602249a4bd17c16948d7331a483b262449eb5a9 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 21 Oct 2024 13:06:32 +0900 Subject: [PATCH 099/128] fix(networking): speed up get_filtered_peers_exceeding_range --- sn_networking/src/cmd.rs | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 5ae19c7a4a..3f8d0693c4 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -29,6 +29,7 @@ use sn_protocol::{ NetworkAddress, PrettyPrintRecordKey, }; use std::{ + cmp::Ordering, collections::{BTreeMap, HashMap}, fmt::Debug, time::Duration, @@ -980,26 +981,30 @@ impl SwarmDriver { let acceptable_distance_range = self.get_request_range(); let target_key = target_address.as_kbucket_key(); - let peers = self + let sorted_peers: Vec<_> = self .swarm .behaviour_mut() .kademlia .get_closest_local_peers(&target_key) - .filter_map(|key| { - // here we compare _bucket_, not the exact distance. - // We want to include peers that are just outside the range - // Such that we can and will exceed the range in a search eventually - if acceptable_distance_range.ilog2() < target_key.distance(&key).ilog2() { - return None; - } + .collect(); - // Map KBucketKey to PeerId. - let peer_id = key.into_preimage(); - Some(peer_id) + // Binary search to find the index where we exceed the acceptable range + let split_index = sorted_peers + .binary_search_by(|key| { + let distance = target_key.distance(key); + if distance >= acceptable_distance_range { + Ordering::Greater + } else { + Ordering::Less + } }) - .collect::>(); + .unwrap_or_else(|x| x); - peers + // Convert KBucketKey to PeerId for all peers within range + sorted_peers[..split_index] + .iter() + .map(|key| key.into_preimage()) + .collect() } /// From all local peers, returns any within current get_range for a given key From 23cf0f43faac7a8a2a2ca848270b70ea14e50eb9 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 28 Oct 2024 10:33:59 +0900 Subject: [PATCH 100/128] feat(networking): maintain records by bucket list This allows for easier searching for relevant records, removing a lot of distance computation --- sn_networking/src/record_store.rs | 110 +++++++++++++++++------------- 1 file changed, 62 insertions(+), 48 deletions(-) diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index cb9a8946db..254ec6380a 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -22,7 +22,7 @@ use libp2p::{ identity::PeerId, kad::{ store::{Error, RecordStore, Result}, - KBucketDistance as Distance, KBucketKey, ProviderRecord, Record, RecordKey as Key, + KBucketDistance as Distance, ProviderRecord, Record, RecordKey as Key, }, }; #[cfg(feature = "open-metrics")] @@ -70,14 +70,14 @@ const MIN_STORE_COST: u64 = 1; /// A `RecordStore` that stores records on disk. pub struct NodeRecordStore { - /// The identity of the peer owning the store. - local_key: KBucketKey, /// The address of the peer owning the store local_address: NetworkAddress, /// The configuration of the store. config: NodeRecordStoreConfig, - /// A set of keys, each corresponding to a data `Record` stored on disk. + /// Main records store remains unchanged for compatibility records: HashMap, + /// Additional index organizing records by distance bucket + records_by_bucket: HashMap>, /// FIFO simple cache of records to reduce read times records_cache: VecDeque, /// A map from record keys to their indices in the cache @@ -284,10 +284,10 @@ impl NodeRecordStore { let cache_size = config.records_cache_size; let mut record_store = NodeRecordStore { - local_key: KBucketKey::from(local_id), local_address: NetworkAddress::from_peer(local_id), config, records, + records_by_bucket: HashMap::new(), records_cache: VecDeque::with_capacity(cache_size), records_cache_map: HashMap::with_capacity(cache_size), network_event_sender, @@ -466,29 +466,25 @@ impl NodeRecordStore { return; }; - let mut removed_keys = Vec::new(); - self.records.retain(|key, _val| { - let kbucket_key = KBucketKey::new(key.to_vec()); - let is_in_range = responsible_range >= self.local_key.distance(&kbucket_key); - if !is_in_range { - removed_keys.push(key.clone()); - } - is_in_range - }); + let max_bucket = responsible_range.ilog2().unwrap_or_default(); + + // Collect keys to remove from buckets beyond our range + let keys_to_remove: Vec = self + .records_by_bucket + .iter() + .filter(|(&bucket, _)| bucket > max_bucket) + .flat_map(|(_, keys)| keys.iter().cloned()) + .collect(); - // Each `remove` function call will try to re-calculate furthest - // when the key to be removed is the current furthest. - // To avoid duplicated calculation, hence reset `furthest` first here. - self.farthest_record = self.calculate_farthest(); + let keys_to_remove_len = keys_to_remove.len(); - for key in removed_keys.iter() { - // Deletion from disk will be undertaken as a spawned task, - // hence safe to call this function repeatedly here. - self.remove(key); + // Remove collected keys + for key in keys_to_remove { + self.remove(&key); } info!("Cleaned up {} unrelevant records, among the original {accumulated_records} accumulated_records", - removed_keys.len()); + keys_to_remove_len); } } @@ -517,17 +513,26 @@ impl NodeRecordStore { /// to return the record as stored. pub(crate) fn mark_as_stored(&mut self, key: Key, record_type: RecordType) { let addr = NetworkAddress::from_record_key(&key); - let _ = self - .records + let distance = self.local_address.distance(&addr); + let bucket = distance.ilog2().unwrap_or_default(); + + // Update main records store + self.records .insert(key.clone(), (addr.clone(), record_type)); - let key_distance = self.local_address.distance(&addr); + // Update bucket index + self.records_by_bucket + .entry(bucket) + .or_default() + .insert(key.clone()); + + // Update farthest record if needed (unchanged) if let Some((_farthest_record, farthest_record_distance)) = self.farthest_record.clone() { - if key_distance > farthest_record_distance { - self.farthest_record = Some((key, key_distance)); + if distance > farthest_record_distance { + self.farthest_record = Some((key, distance)); } } else { - self.farthest_record = Some((key, key_distance)); + self.farthest_record = Some((key, distance)); } } @@ -692,28 +697,21 @@ impl NodeRecordStore { /// Calculate how many records are stored within a distance range pub fn get_records_within_distance_range( &self, - records: HashSet<&Key>, - distance_range: Distance, + _records: HashSet<&Key>, + max_distance: Distance, ) -> usize { - debug!( - "Total record count is {:?}. Distance is: {distance_range:?}", - self.records.len() - ); + let max_bucket = max_distance.ilog2().unwrap_or_default(); - // Pre-calculate the local_key distance once - let local_key_distance = |key: &&Key| { - let kbucket_key = KBucketKey::new(key.to_vec()); - self.local_key.distance(&kbucket_key) - }; + let within_range = self + .records_by_bucket + .iter() + .filter(|(&bucket, _)| bucket <= max_bucket) + .map(|(_, keys)| keys.len()) + .sum(); - // Use par_iter() for parallel processing and any() to short-circuit - let relevant_records_len = records - .par_iter() // Process in parallel - .filter(|key| distance_range >= local_key_distance(key)) - .count(); + Marker::CloseRecordsLen(within_range).log(); - Marker::CloseRecordsLen(relevant_records_len).log(); - relevant_records_len + within_range } /// Setup the distance range. @@ -811,7 +809,23 @@ impl RecordStore for NodeRecordStore { } fn remove(&mut self, k: &Key) { - let _ = self.records.remove(k); + // Remove from main store + if let Some((addr, _)) = self.records.remove(k) { + // Remove from bucket index + let bucket = self + .local_address + .distance(&addr) + .ilog2() + .unwrap_or_default(); + if let Some(bucket_keys) = self.records_by_bucket.get_mut(&bucket) { + bucket_keys.remove(k); + // Clean up empty buckets + if bucket_keys.is_empty() { + self.records_by_bucket.remove(&bucket); + } + } + } + self.records_cache.retain(|r| r.key != *k); #[cfg(feature = "open-metrics")] From 1b77e7a56e04f5e15e45571aadaaba72ec175b34 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 28 Oct 2024 10:46:38 +0900 Subject: [PATCH 101/128] feat(networking): throttle replication --- sn_networking/src/cmd.rs | 14 ++++++++++++++ sn_networking/src/driver.rs | 4 ++++ 2 files changed, 18 insertions(+) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 3f8d0693c4..50063d5388 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -43,6 +43,9 @@ const MAX_CONTINUOUS_HDD_WRITE_ERROR: usize = 5; // Shall be synced with `sn_node::PERIODIC_REPLICATION_INTERVAL_MAX_S` const REPLICATION_TIMEOUT: Duration = Duration::from_secs(45); +// Add this constant at the top of the file with other constants +const MIN_REPLICATION_INTERVAL: Duration = Duration::from_secs(60); // one minute + #[derive(Debug, Eq, PartialEq)] pub enum NodeIssue { /// Data Replication failed @@ -1038,6 +1041,17 @@ impl SwarmDriver { } fn try_interval_replication(&mut self) -> Result<()> { + // Add a last_replication field to track the last time replication was performed + if let Some(last_replication) = self.last_replication { + if last_replication.elapsed() < MIN_REPLICATION_INTERVAL { + info!("Skipping replication as minimum interval hasn't elapsed"); + return Ok(()); + } + } + + // Store the current time as the last replication time + self.last_replication = Some(Instant::now()); + let our_address = NetworkAddress::from_peer(self.self_peer_id); let mut replicate_targets = diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index f88157766e..b6917919fe 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -720,6 +720,7 @@ impl NetworkBuilder { replication_targets: Default::default(), range_distances: VecDeque::with_capacity(GET_RANGE_STORAGE_LIMIT), first_contact_made: false, + last_replication: None, }; let network = Network::new( @@ -818,6 +819,9 @@ pub struct SwarmDriver { pub(crate) quotes_history: BTreeMap, pub(crate) replication_targets: BTreeMap, + /// when was the last replication event + /// This allows us to throttle replication no matter how it is triggered + pub(crate) last_replication: Option, // The recent range_distances calculated by the node // Each update is generated when there is a routing table change // We use the largest of these X_STORAGE_LIMIT values as our X distance. From d87560f00f40d4cfccea66bc900a1e4e6aad5119 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 28 Oct 2024 13:11:48 +0900 Subject: [PATCH 102/128] feat(networking): reduce replication interval, decrease throttle --- sn_networking/src/cmd.rs | 6 +++--- sn_node/src/node.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 50063d5388..48cb8f1307 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -43,8 +43,8 @@ const MAX_CONTINUOUS_HDD_WRITE_ERROR: usize = 5; // Shall be synced with `sn_node::PERIODIC_REPLICATION_INTERVAL_MAX_S` const REPLICATION_TIMEOUT: Duration = Duration::from_secs(45); -// Add this constant at the top of the file with other constants -const MIN_REPLICATION_INTERVAL: Duration = Duration::from_secs(60); // one minute +// Throttles replication to at most once every 30 seconds +const MIN_REPLICATION_INTERVAL_S: Duration = Duration::from_secs(30); #[derive(Debug, Eq, PartialEq)] pub enum NodeIssue { @@ -1043,7 +1043,7 @@ impl SwarmDriver { fn try_interval_replication(&mut self) -> Result<()> { // Add a last_replication field to track the last time replication was performed if let Some(last_replication) = self.last_replication { - if last_replication.elapsed() < MIN_REPLICATION_INTERVAL { + if last_replication.elapsed() < MIN_REPLICATION_INTERVAL_S { info!("Skipping replication as minimum interval hasn't elapsed"); return Ok(()); } diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 3ac1aae6bb..b44d281eff 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -45,7 +45,7 @@ use sn_evm::EvmNetwork; /// Interval to trigger replication of all records to all peers. /// This is the max time it should take. Minimum interval at any node will be half this -pub const PERIODIC_REPLICATION_INTERVAL_MAX_S: u64 = 45; +pub const PERIODIC_REPLICATION_INTERVAL_MAX_S: u64 = 180; /// Interval to trigger bad node detection. /// This is the max time it should take. Minimum interval at any node will be half this From de5ea2b4f542f7741141ebbbc332299666101f57 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 28 Oct 2024 13:45:49 +0900 Subject: [PATCH 103/128] feat(networking): improve add_keys perf for repl fetcher --- sn_networking/src/replication_fetcher.rs | 90 +++++++++++++----------- 1 file changed, 48 insertions(+), 42 deletions(-) diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 5e0d3a3ad4..0dcd1e71b5 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -80,43 +80,43 @@ impl ReplicationFetcher { locally_stored_keys: &HashMap, all_local_peers: &[PeerId], ) -> Vec<(PeerId, RecordKey)> { - // remove locally stored from incoming_keys - let mut new_incoming_keys: Vec<_> = incoming_keys - .iter() - .filter(|(addr, record_type)| { - let key = &addr.to_record_key(); - !locally_stored_keys.contains_key(key) - && !self - .to_be_fetched - .contains_key(&(key.clone(), record_type.clone(), holder)) - }) - .cloned() - .collect(); - - self.remove_stored_keys(locally_stored_keys); + // Pre-calculate self_address since it's used multiple times let self_address = NetworkAddress::from_peer(self.self_peer_id); - let total_incoming_keys = new_incoming_keys.len(); + let total_incoming_keys = incoming_keys.len(); - // In case of node full, restrict fetch range - if let Some(farthest_distance) = self.farthest_acceptable_distance { - let mut out_of_range_keys = vec![]; - new_incoming_keys.retain(|(addr, _)| { - let is_in_range = self_address.distance(addr) <= farthest_distance; - if !is_in_range { - out_of_range_keys.push(addr.clone()); - } - is_in_range - }); + // Avoid multiple allocations by using with_capacity + let mut new_incoming_keys = Vec::with_capacity(incoming_keys.len()); + let mut keys_to_fetch = Vec::new(); + let mut out_of_range_keys = Vec::new(); + + // Single pass filtering instead of multiple retain() calls + for (addr, record_type) in incoming_keys { + let key = addr.to_record_key(); - info!("Node is full, among {total_incoming_keys} incoming replications from {holder:?}, found {} beyond current farthest", out_of_range_keys.len()); - for addr in out_of_range_keys.iter() { - debug!("Node is full, the incoming record_key {addr:?} is beyond current farthest record"); + // Skip if locally stored or already pending fetch + if locally_stored_keys.contains_key(&key) + || self + .to_be_fetched + .contains_key(&(key.clone(), record_type.clone(), holder)) + { + continue; } + + // Check distance constraints + if let Some(farthest_distance) = self.farthest_acceptable_distance { + if self_address.distance(&addr) > farthest_distance { + out_of_range_keys.push(addr); + continue; + } + } + + new_incoming_keys.push((addr, record_type)); } - let mut keys_to_fetch = vec![]; - // For new data, it will be replicated out in a special replication_list of length 1. - // And we shall `fetch` that copy immediately (if in range), if it's not being fetched. + // Remove any outdated entries in `to_be_fetched` + self.remove_stored_keys(locally_stored_keys); + + // Special case for single new key if new_incoming_keys.len() == 1 { let (record_address, record_type) = new_incoming_keys[0].clone(); @@ -135,8 +135,6 @@ impl ReplicationFetcher { self.to_be_fetched .retain(|_, time_out| *time_out > Instant::now()); - let mut out_of_range_keys = vec![]; - // Filter out those out_of_range ones among the incoming_keys. if let Some(ref distance_range) = self.distance_range { new_incoming_keys.retain(|(addr, _record_type)| { @@ -168,9 +166,11 @@ impl ReplicationFetcher { if !out_of_range_keys.is_empty() { info!("Among {total_incoming_keys} incoming replications from {holder:?}, found {} out of range", out_of_range_keys.len()); - for addr in out_of_range_keys.iter() { - let ilog2_distance = self_address.distance(addr).ilog2(); - debug!("The incoming record_key {addr:?} is out of range with ilog2_distance being {ilog2_distance:?}, do not fetch it from {holder:?}"); + if tracing::log::log_enabled!(tracing::log::Level::Debug) { + for addr in out_of_range_keys.iter() { + let ilog2_distance = self_address.distance(addr).ilog2(); + debug!("The incoming record_key {addr:?} is out of range with ilog2_distance being {ilog2_distance:?}, do not fetch it from {holder:?}"); + } } } @@ -279,14 +279,20 @@ impl ReplicationFetcher { return vec![]; } - if !self.to_be_fetched.is_empty() { - debug!( - "Number of records still to be retrieved: {:?}", - self.to_be_fetched.len() - ); + // early return if nothing there + if self.to_be_fetched.is_empty() { + return vec![]; } - let mut data_to_fetch = vec![]; + debug!( + "Number of records still to be retrieved: {:?}", + self.to_be_fetched.len() + ); + + // Pre-allocate vectors with known capacity + let remaining_capacity = MAX_PARALLEL_FETCH - self.on_going_fetches.len(); + let mut data_to_fetch = Vec::with_capacity(remaining_capacity); + // Sort to_be_fetched by key closeness to our PeerId let mut to_be_fetched_sorted: Vec<_> = self.to_be_fetched.iter_mut().collect(); From 6e0beefb0659bba349b6b8bd5a43d5aafda4c005 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 28 Oct 2024 14:57:55 +0900 Subject: [PATCH 104/128] chore(networking): remove debug only work --- sn_networking/src/replication_fetcher.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 0dcd1e71b5..1858d65350 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -166,12 +166,6 @@ impl ReplicationFetcher { if !out_of_range_keys.is_empty() { info!("Among {total_incoming_keys} incoming replications from {holder:?}, found {} out of range", out_of_range_keys.len()); - if tracing::log::log_enabled!(tracing::log::Level::Debug) { - for addr in out_of_range_keys.iter() { - let ilog2_distance = self_address.distance(addr).ilog2(); - debug!("The incoming record_key {addr:?} is out of range with ilog2_distance being {ilog2_distance:?}, do not fetch it from {holder:?}"); - } - } } // add in-range AND non existing keys to the fetcher From 7d6885204b35123991f1f7ca426cb5568c37370c Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 28 Oct 2024 15:27:32 +0900 Subject: [PATCH 105/128] chore(networking): reduce CONNECT_TIMEOUT --- autonomi/src/client/mod.rs | 2 +- sn_networking/src/driver.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 172fb9ba4f..0933d70f86 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -42,7 +42,7 @@ use std::{collections::HashSet, sync::Arc, time::Duration}; use tokio::sync::mpsc; /// Time before considering the connection timed out. -pub const CONNECT_TIMEOUT_SECS: u64 = 20; +pub const CONNECT_TIMEOUT_SECS: u64 = 10; const CLIENT_EVENT_CHANNEL_SIZE: usize = 100; diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index b6917919fe..30386e14d8 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -121,7 +121,7 @@ pub const MAX_PACKET_SIZE: usize = 1024 * 1024 * 5; // the chunk size is 1mb, so // Timeout for requests sent/received through the request_response behaviour. const REQUEST_TIMEOUT_DEFAULT_S: Duration = Duration::from_secs(30); // Sets the keep-alive timeout of idle connections. -const CONNECTION_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(30); +const CONNECTION_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(10); // Inverval of resending identify to connected peers. const RESEND_IDENTIFY_INVERVAL: Duration = Duration::from_secs(3600); From f0d1c3dc30a02009f36740a8dc4a00600bc3edaa Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 28 Oct 2024 20:59:47 +0900 Subject: [PATCH 106/128] chore(networking): do not print all keys, verify one key at once --- sn_networking/src/event/mod.rs | 4 +- sn_networking/src/event/request_response.rs | 43 ++++++++----- sn_node/src/node.rs | 68 +++++++++------------ 3 files changed, 59 insertions(+), 56 deletions(-) diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 7af3b268c5..e1d8074d29 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -146,7 +146,7 @@ pub enum NetworkEvent { /// Carry out chunk proof check against the specified record and peer ChunkProofVerification { peer_id: PeerId, - keys_to_verify: Vec, + key_to_verify: NetworkAddress, }, } @@ -208,7 +208,7 @@ impl Debug for NetworkEvent { } NetworkEvent::ChunkProofVerification { peer_id, - keys_to_verify, + key_to_verify: keys_to_verify, } => { write!( f, diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index b0c9344724..c46caa756e 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -250,14 +250,19 @@ impl SwarmDriver { if keys_to_verify.is_empty() { debug!("No valid candidate to be checked against peer {holder:?}"); - } else if let Err(error) = event_sender - .send(NetworkEvent::ChunkProofVerification { - peer_id: holder, - keys_to_verify, - }) - .await - { - error!("SwarmDriver failed to send event: {}", error); + } else { + // choose one random key to verify + let key_to_verify = + keys_to_verify[OsRng.gen_range(0..keys_to_verify.len())].clone(); + if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: holder, + key_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); + } } // In additon to verify the sender, we also verify a random close node. @@ -281,14 +286,20 @@ impl SwarmDriver { if keys_to_verify.is_empty() { debug!("No valid candidate to be checked against peer {candidate:?}"); - } else if let Err(error) = event_sender - .send(NetworkEvent::ChunkProofVerification { - peer_id: candidate_peer_id, - keys_to_verify, - }) - .await - { - error!("SwarmDriver failed to send event: {}", error); + } else { + // choose one random key to verify + let key_to_verify = + keys_to_verify[OsRng.gen_range(0..keys_to_verify.len())].clone(); + + if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: candidate_peer_id, + key_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); + } } break; diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index b44d281eff..204067879a 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -485,12 +485,12 @@ impl Node { } NetworkEvent::ChunkProofVerification { peer_id, - keys_to_verify, + key_to_verify, } => { event_header = "ChunkProofVerification"; let network = self.network().clone(); - debug!("Going to verify chunk {keys_to_verify:?} against peer {peer_id:?}"); + debug!("Going to verify chunk {key_to_verify} against peer {peer_id:?}"); let _handle = spawn(async move { // To avoid the peer is in the process of getting the copy via replication, @@ -498,7 +498,7 @@ impl Node { // Only report the node as bad when ALL the verification attempts failed. let mut attempts = 0; while attempts < MAX_CHUNK_PROOF_VERIFY_ATTEMPTS { - if chunk_proof_verify_peer(&network, peer_id, &keys_to_verify).await { + if chunk_proof_verify_peer(&network, peer_id, &key_to_verify).await { return; } // Replication interval is 22s - 45s. @@ -768,44 +768,36 @@ impl Node { } } -async fn chunk_proof_verify_peer( - network: &Network, - peer_id: PeerId, - keys: &[NetworkAddress], -) -> bool { - for key in keys.iter() { - let check_passed = if let Ok(Some(record)) = - network.get_local_record(&key.to_record_key()).await - { - let nonce = thread_rng().gen::(); - let expected_proof = ChunkProof::new(&record.value, nonce); - debug!("To verify peer {peer_id:?}, chunk_proof for {key:?} is {expected_proof:?}"); - - let request = Request::Query(Query::GetChunkExistenceProof { - key: key.clone(), - nonce, - }); - let responses = network - .send_and_get_responses(&[peer_id], &request, true) - .await; - let n_verified = responses - .into_iter() - .filter_map(|(peer, resp)| { - received_valid_chunk_proof(key, &expected_proof, peer, resp) - }) - .count(); - - n_verified >= 1 - } else { - error!( +async fn chunk_proof_verify_peer(network: &Network, peer_id: PeerId, key: &NetworkAddress) -> bool { + let check_passed = if let Ok(Some(record)) = + network.get_local_record(&key.to_record_key()).await + { + let nonce = thread_rng().gen::(); + let expected_proof = ChunkProof::new(&record.value, nonce); + debug!("To verify peer {peer_id:?}, chunk_proof for {key:?} is {expected_proof:?}"); + + let request = Request::Query(Query::GetChunkExistenceProof { + key: key.clone(), + nonce, + }); + let responses = network + .send_and_get_responses(&[peer_id], &request, true) + .await; + let n_verified = responses + .into_iter() + .filter_map(|(peer, resp)| received_valid_chunk_proof(key, &expected_proof, peer, resp)) + .count(); + + n_verified >= 1 + } else { + error!( "To verify peer {peer_id:?} Could not get ChunkProof for {key:?} as we don't have the record locally." ); - true - }; + true + }; - if !check_passed { - return false; - } + if !check_passed { + return false; } true From 58613604f743449e21a2aca5cb37e25baebfc16d Mon Sep 17 00:00:00 2001 From: qima Date: Mon, 28 Oct 2024 19:07:33 +0800 Subject: [PATCH 107/128] fix(CI): address failed CI --- .github/workflows/memcheck.yml | 9 ++++++--- .github/workflows/merge.yml | 17 +++++++++++------ 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/.github/workflows/memcheck.yml b/.github/workflows/memcheck.yml index cbfb52d4cc..d16b417fca 100644 --- a/.github/workflows/memcheck.yml +++ b/.github/workflows/memcheck.yml @@ -5,9 +5,9 @@ on: # on main, we want to know that all commits are passing at a glance, any deviation should help bisecting errors # the merge run checks should show on master and enable this clear test/passing history merge_group: - branches: [ main, alpha*, beta*, rc* ] + branches: [main, alpha*, beta*, rc*] pull_request: - branches: [ "*" ] + branches: ["*"] env: SAFE_DATA_PATH: /home/runner/.local/share/safe @@ -126,6 +126,9 @@ jobs: - name: Assert we've reloaded some chunks run: rg "Existing record found" $RESTART_TEST_NODE_DATA_PATH + - name: Wait at least 1min for replication to happen # it is throttled to once/30s. + run: sleep 60 + - name: Verify data replication using rg shell: bash timeout-minutes: 1 @@ -232,7 +235,7 @@ jobs: # Logging of handling time is on Trace level, # meanwhile the local_network startup tool sets the logging level on Debug. - # + # # - name: Check node swarm_driver handling statistics # shell: bash # # With the latest improvements, swarm_driver will be in high chance diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 720a2f7e25..eb880a43f9 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -811,19 +811,24 @@ jobs: # Otherwise kad will remove a `dropped out node` directly from RT. # So, the detection of the removal explicity will now have much less chance, # due to the removal of connection_issue tracking. + # + # With the further reduction of replication frequency, + # it now becomes harder to detect a `dropped out node` as a `failed to replicate` node. + # Hence now remove the assertion check and replace with a print out only. run: | + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + echo "Node dir count is $node_count" restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ rg "(\d+) matches" | rg "\d+" -o) echo "Restart $restart_count nodes" + if ! rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats + then + echo "No peer removal count found" + exit 0 + fi peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ rg "(\d+) matches" | rg "\d+" -o) echo "PeerRemovedFromRoutingTable $peer_removed times" - if [ -z "$peer_removed" ]; then - echo "No peer removal count found" - exit 1 - fi - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - echo "Node dir count is $node_count" # Only error out after uploading the logs - name: Don't log raw data From 7a41d8b5226fdbd9e633fd3bf46ada17164e3fe1 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 28 Oct 2024 19:05:28 +0530 Subject: [PATCH 108/128] feat(metrics): cap node wallet balance to i64::MAX --- sn_node/src/put_validation.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 5a5dac140b..224fc3bcb9 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -640,9 +640,13 @@ impl Node { #[cfg(feature = "open-metrics")] if let Some(metrics_recorder) = self.metrics_recorder() { + // FIXME: We would reach the MAX if the storecost is scaled up. + let current_value = metrics_recorder.current_reward_wallet_balance.get(); + let new_value = + current_value.saturating_add(storecost.as_atto().try_into().unwrap_or(i64::MAX)); let _ = metrics_recorder .current_reward_wallet_balance - .inc_by(storecost.as_atto().try_into().unwrap_or(i64::MAX)); // TODO maybe metrics should be in u256 too? + .set(new_value); } self.events_channel() .broadcast(crate::NodeEvent::RewardReceived(storecost, address.clone())); From 26bce27e2dbe88f4d2e1e3035a382128615877a9 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Mon, 28 Oct 2024 15:31:40 +0100 Subject: [PATCH 109/128] fix(autonomi): use FuturesUnordered for WASM Instead of JoinSet from `tokio` which uses the Tokio runtime. --- autonomi/src/client/data.rs | 18 ++++++------------ autonomi/src/client/data_private.rs | 13 +++++-------- 2 files changed, 11 insertions(+), 20 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 869022cd37..164c85b6b0 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use bytes::Bytes; +use futures::StreamExt as _; use libp2p::kad::Quorum; -use tokio::task::{JoinError, JoinSet}; use std::collections::HashSet; use xor_name::XorName; @@ -47,8 +47,6 @@ pub enum PutError { VaultBadOwner, #[error("Payment unexpectedly invalid for {0:?}")] PaymentUnexpectedlyInvalid(NetworkAddress), - #[error("Could not simultaneously upload chunks: {0:?}")] - JoinError(tokio::task::JoinError), } /// Errors that can occur during the pay operation. @@ -80,8 +78,6 @@ pub enum GetError { /// Errors that can occur during the cost calculation. #[derive(Debug, thiserror::Error)] pub enum CostError { - #[error("Could not simultaneously fetch store costs: {0:?}")] - JoinError(JoinError), #[error("Failed to self-encrypt data.")] SelfEncryption(#[from] crate::self_encryption::Error), #[error("Could not get store quote for: {0:?} after several retries")] @@ -135,13 +131,14 @@ impl Client { // Upload all the chunks in parallel including the data map chunk debug!("Uploading {} chunks", chunks.len()); - let mut tasks = JoinSet::new(); + let mut tasks = futures::stream::FuturesUnordered::new(); + for chunk in chunks.into_iter().chain(std::iter::once(data_map_chunk)) { let self_clone = self.clone(); let address = *chunk.address(); if let Some(proof) = payment_proofs.get(chunk.name()) { let proof_clone = proof.clone(); - tasks.spawn(async move { + tasks.push(async move { self_clone .chunk_upload_with_payment(chunk, proof_clone) .await @@ -151,11 +148,8 @@ impl Client { debug!("Chunk at {address:?} was already paid for so skipping"); } } - while let Some(result) = tasks.join_next().await { - result - .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) - .map_err(PutError::JoinError)? - .inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; + while let Some(result) = tasks.next().await { + result.inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; record_count += 1; } diff --git a/autonomi/src/client/data_private.rs b/autonomi/src/client/data_private.rs index b6d0bfa8a3..35eb3e30d2 100644 --- a/autonomi/src/client/data_private.rs +++ b/autonomi/src/client/data_private.rs @@ -9,10 +9,10 @@ use std::hash::{DefaultHasher, Hash, Hasher}; use bytes::Bytes; +use futures::StreamExt as _; use serde::{Deserialize, Serialize}; use sn_evm::{Amount, EvmWallet}; use sn_protocol::storage::Chunk; -use tokio::task::JoinSet; use super::data::{GetError, PutError}; use crate::client::{ClientEvent, UploadSummary}; @@ -80,13 +80,13 @@ impl Client { // Upload the chunks with the payments let mut record_count = 0; debug!("Uploading {} chunks", chunks.len()); - let mut tasks = JoinSet::new(); + let mut tasks = futures::stream::FuturesUnordered::new(); for chunk in chunks { let self_clone = self.clone(); let address = *chunk.address(); if let Some(proof) = payment_proofs.get(chunk.name()) { let proof_clone = proof.clone(); - tasks.spawn(async move { + tasks.push(async move { self_clone .chunk_upload_with_payment(chunk, proof_clone) .await @@ -96,11 +96,8 @@ impl Client { debug!("Chunk at {address:?} was already paid for so skipping"); } } - while let Some(result) = tasks.join_next().await { - result - .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) - .map_err(PutError::JoinError)? - .inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; + while let Some(result) = tasks.next().await { + result.inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; record_count += 1; } From c8d39a0c7047d3423eb4f08a90bb6fa970b61467 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 28 Oct 2024 15:43:29 +0100 Subject: [PATCH 110/128] fix(launchpad): changing copy on terms and conditions --- node-launchpad/src/components/popup/rewards_address.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node-launchpad/src/components/popup/rewards_address.rs b/node-launchpad/src/components/popup/rewards_address.rs index 6f8eda7db0..8ec3741034 100644 --- a/node-launchpad/src/components/popup/rewards_address.rs +++ b/node-launchpad/src/components/popup/rewards_address.rs @@ -329,7 +329,7 @@ impl Component for RewardsAddress { .split(layer_one[1]); let text = Paragraph::new(vec![ - Line::from(Span::styled("Add your wallet and you can earn a slice of millions of tokens created at the genesis of the Autonomi Network when through running nodes.",Style::default())), + Line::from(Span::styled("Add your wallet to store your node earnings, and we'll pay you rewards to the same wallet after the Network's Token Generation Event.",Style::default())), Line::from(Span::styled("\n\n",Style::default())), Line::from(Span::styled("By continuing you agree to the Terms and Conditions found here:",Style::default())), Line::from(Span::styled("\n\n",Style::default())), From 5d96ef69d71521a3785736ce3e3d7fc5305e3b03 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Mon, 28 Oct 2024 16:35:58 +0100 Subject: [PATCH 111/128] docs(autonomi): add WASM docs --- autonomi/WASM_docs.md | 156 ++---------------------------------- autonomi/src/client/wasm.rs | 104 ++++++++++++++++++++++++ 2 files changed, 112 insertions(+), 148 deletions(-) diff --git a/autonomi/WASM_docs.md b/autonomi/WASM_docs.md index 995809b8bd..6cf080113f 100644 --- a/autonomi/WASM_docs.md +++ b/autonomi/WASM_docs.md @@ -1,160 +1,24 @@ -## JavaScript Autonomi API Documentation +# JavaScript Autonomi API Documentation Note that this is a first version and will be subject to change. -### **Client** +The entry point for connecting to the network is {@link Client.connect}. -The `Client` object allows interaction with the network to store and retrieve data. Below are the available methods for the `Client` class. +This API is a wrapper around the Rust API, found here: https://docs.rs/autonomi/latest/autonomi. The Rust API contains more detailed documentation on concepts and some types. -#### **Constructor** +## Addresses -```javascript -let client = await new Client([multiaddress]); -``` - -- **multiaddress** (Array of Strings): A list of network addresses for the client to connect to. - -Example: -```javascript -let client = await new Client(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); -``` - -#### **Methods** - -##### **put(data, wallet)** - -Uploads a piece of encrypted data to the network. - -```javascript -let result = await client.put(data, wallet); -``` - -- **data** (Uint8Array): The data to be stored. -- **wallet** (Wallet): The wallet used to pay for the storage. - -Returns: -- **result** (XorName): The XOR address of the stored data. - -Example: -```javascript -let wallet = getFundedWallet(); -let data = new Uint8Array([1, 2, 3]); -let result = await client.put(data, wallet); -``` - -##### **get(data_map_addr)** - -Fetches encrypted data from the network using its XOR address. - -```javascript -let data = await client.get(data_map_addr); -``` - -- **data_map_addr** (XorName): The XOR address of the data to fetch. - -Returns: -- **data** (Uint8Array): The fetched data. - -Example: -```javascript -let data = await client.get(result); -``` - -##### **cost(data)** - -Gets the cost of storing the provided data on the network. - -```javascript -let cost = await client.cost(data); -``` - -- **data** (Uint8Array): The data whose storage cost you want to calculate. - -Returns: -- **cost** (AttoTokens): The calculated cost for storing the data. - -Example: -```javascript -let cost = await client.cost(new Uint8Array([1, 2, 3])); -``` - ---- - -### **Wallet** - -The `Wallet` object represents an Ethereum wallet used for data payments. - -#### **Methods** - -##### **new_from_private_key(network, private_key)** - -Creates a new wallet using the given private key. - -```javascript -let wallet = Wallet.new_from_private_key(network, private_key); -``` +For addresses (chunk, data, archives, etc) we're using hex-encoded strings containing a 256-bit XOR addresse. For example: `abcdefg012345678900000000000000000000000000000000000000000000000`. -- **network** (EvmNetwork): The network to which the wallet connects. -- **private_key** (String): The private key of the wallet. - -Returns: -- **wallet** (Wallet): The created wallet. - -Example: -```javascript -let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); -``` - -##### **address()** - -Gets the wallet’s address. +## Example ```javascript -let address = wallet.address(); -``` - -Returns: -- **address** (Address): The wallet's address. +import init, { Client, Wallet, getEvmNetwork } from 'autonomi'; -Example: -```javascript -let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); -let address = wallet.address(); -``` - ---- - -### **EvmNetwork** - -The `EvmNetwork` object represents the blockchain network. - -#### **Methods** - -##### **default()** - -Connects to the default network. - -```javascript -let network = EvmNetwork.default(); -``` - -Returns: -- **network** (EvmNetwork): The default network. - -Example: -```javascript -let network = EvmNetwork.default(); -``` - ---- - -### Example Usage: - -```javascript let client = await new Client(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); console.log("connected"); -let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); +let wallet = Wallet.new_from_private_key(getEvmNetwork, "your_private_key_here"); console.log("wallet retrieved"); let data = new Uint8Array([1, 2, 3]); @@ -164,7 +28,3 @@ console.log("Data stored at:", result); let fetchedData = await client.get(result); console.log("Data retrieved:", fetchedData); ``` - ---- - -This documentation covers the basic usage of `Client`, `Wallet`, and `EvmNetwork` types in the JavaScript API. \ No newline at end of file diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index 0d395c0d4f..a102626ea3 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -6,6 +6,22 @@ use super::address::{addr_to_str, str_to_addr}; #[cfg(feature = "vault")] use super::vault::UserData; +/// The `Client` object allows interaction with the network to store and retrieve data. +/// +/// To connect to the network, see {@link Client.connect}. +/// +/// # Example +/// +/// ```js +/// let client = await Client.connect(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); +/// const dataAddr = await client.dataPut(new Uint8Array([0, 1, 2, 3]), wallet); +/// +/// const archive = new Archive(); +/// archive.addNewFile("foo", dataAddr); +/// +/// const archiveAddr = await client.archivePut(archive, wallet); +/// const archiveFetched = await client.archiveGet(archiveAddr); +/// ``` #[wasm_bindgen(js_name = Client)] pub struct JsClient(super::Client); @@ -21,6 +37,13 @@ impl AttoTokens { #[wasm_bindgen(js_class = Client)] impl JsClient { + /// Connect to the network via the given peers. + /// + /// # Example + /// + /// ```js + /// let client = await Client.connect(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); + /// ``` #[wasm_bindgen] pub async fn connect(peers: Vec) -> Result { let peers = peers @@ -33,11 +56,17 @@ impl JsClient { Ok(JsClient(client)) } + /// Upload a chunk to the network. + /// + /// Returns the hex encoded address of the chunk. + /// + /// This is not yet implemented. #[wasm_bindgen(js_name = chunkPut)] pub async fn chunk_put(&self, _data: Vec, _wallet: &JsWallet) -> Result { async { unimplemented!() }.await } + /// Fetch the chunk from the network. #[wasm_bindgen(js_name = chunkGet)] pub async fn chunk_get(&self, addr: String) -> Result, JsError> { let addr = str_to_addr(&addr)?; @@ -46,6 +75,9 @@ impl JsClient { Ok(chunk.value().to_vec()) } + /// Upload data to the network. + /// + /// Returns the hex encoded address of the data. #[wasm_bindgen(js_name = dataPut)] pub async fn data_put(&self, data: Vec, wallet: &JsWallet) -> Result { let data = crate::Bytes::from(data); @@ -54,6 +86,7 @@ impl JsClient { Ok(addr_to_str(xorname)) } + /// Fetch the data from the network. #[wasm_bindgen(js_name = dataGet)] pub async fn data_get(&self, addr: String) -> Result, JsError> { let addr = str_to_addr(&addr)?; @@ -62,6 +95,7 @@ impl JsClient { Ok(data.to_vec()) } + /// Get the cost of uploading data to the network. #[wasm_bindgen(js_name = dataCost)] pub async fn data_cost(&self, data: Vec) -> Result { let data = crate::Bytes::from(data); @@ -76,16 +110,19 @@ mod archive { use crate::client::{address::str_to_addr, archive::Archive}; use std::path::PathBuf; + /// Structure mapping paths to data addresses. #[wasm_bindgen(js_name = Archive)] pub struct JsArchive(Archive); #[wasm_bindgen(js_class = Archive)] impl JsArchive { + /// Create a new archive. #[wasm_bindgen(constructor)] pub fn new() -> Self { Self(Archive::new()) } + /// Add a new file to the archive. #[wasm_bindgen(js_name = addNewFile)] pub fn add_new_file(&mut self, path: String, data_addr: String) -> Result<(), JsError> { let path = PathBuf::from(path); @@ -113,6 +150,7 @@ mod archive { #[wasm_bindgen(js_class = Client)] impl JsClient { + /// Fetch an archive from the network. #[wasm_bindgen(js_name = archiveGet)] pub async fn archive_get(&self, addr: String) -> Result { let addr = str_to_addr(&addr)?; @@ -122,6 +160,9 @@ mod archive { Ok(archive) } + /// Upload an archive to the network. + /// + /// Returns the hex encoded address of the archive. #[wasm_bindgen(js_name = archivePut)] pub async fn archive_put( &self, @@ -139,16 +180,25 @@ mod archive { mod vault { use super::*; + /// Structure to keep track of uploaded archives, registers and other data. #[wasm_bindgen(js_name = UserData)] pub struct JsUserData(UserData); #[wasm_bindgen(js_class = UserData)] impl JsUserData { + /// Create a new user data structure. #[wasm_bindgen(constructor)] pub fn new() -> Self { Self(UserData::new()) } + /// Store an archive address in the user data with an optional name. + /// + /// # Example + /// + /// ```js + /// userData.addFileArchive(archiveAddr, "foo"); + /// ``` #[wasm_bindgen(js_name = addFileArchive)] pub fn add_file_archive( &mut self, @@ -189,6 +239,14 @@ mod vault { #[wasm_bindgen(js_class = Client)] impl JsClient { + /// Fetch the user data from the vault. + /// + /// # Example + /// + /// ```js + /// const secretKey = genSecretKey(); + /// const userData = await client.getUserDataFromVault(secretKey); + /// ``` #[wasm_bindgen(js_name = getUserDataFromVault)] pub async fn get_user_data_from_vault( &self, @@ -199,6 +257,14 @@ mod vault { Ok(JsUserData(user_data)) } + /// Put the user data to the vault. + /// + /// # Example + /// + /// ```js + /// const secretKey = genSecretKey(); + /// await client.putUserDataToVault(userData, wallet, secretKey); + /// ``` #[wasm_bindgen(js_name = putUserDataToVault)] pub async fn put_user_data_to_vault( &self, @@ -232,6 +298,13 @@ mod external_signer { #[wasm_bindgen(js_class = Client)] impl JsClient { + /// Get quotes for given data. + /// + /// # Example + /// + /// ```js + /// const [quotes, quotePayments, free_chunks] = await client.getQuotes(data); + /// `` #[wasm_bindgen(js_name = getQuotes)] pub async fn get_quotes_for_data(&self, data: Vec) -> Result { let data = crate::Bytes::from(data); @@ -240,6 +313,14 @@ mod external_signer { Ok(js_value) } + /// Upload data with a proof of payment. + /// + /// # Example + /// + /// ```js + /// const proof = getPaymentProofFromQuotesAndPayments(quotes, payments); + /// const addr = await client.dataPutWithProof(data, proof); + /// ``` #[wasm_bindgen(js_name = dataPutWithProof)] pub async fn data_put_with_proof_of_payment( &self, @@ -253,6 +334,14 @@ mod external_signer { } } + /// Get the calldata for paying for quotes. + /// + /// # Example + /// + /// ```js + /// const [quotes, quotePayments, free_chunks] = await client.getQuotes(data); + /// const callData = getPayForQuotesCalldata(evmNetwork, quotePayments); + /// ``` #[wasm_bindgen(js_name = getPayForQuotesCalldata)] pub fn get_pay_for_quotes_calldata( network: JsValue, @@ -265,6 +354,7 @@ mod external_signer { Ok(js_value) } + /// Form approve to spend tokens calldata. #[wasm_bindgen(js_name = getApproveToSpendTokensCalldata)] pub fn get_approve_to_spend_tokens_calldata( network: JsValue, @@ -279,6 +369,7 @@ mod external_signer { Ok(js_value) } + /// Generate payment proof. #[wasm_bindgen(js_name = getPaymentProofFromQuotesAndPayments)] pub fn get_payment_proof_from_quotes_and_payments( quotes: JsValue, @@ -295,6 +386,13 @@ mod external_signer { #[wasm_bindgen(js_name = SecretKey)] pub struct SecretKeyJs(bls::SecretKey); +/// # Example +/// +/// ```js +/// const secretKey = genSecretKey(); +/// await client.putUserDataToVault(userData, wallet, secretKey); +/// const userDataFetched = await client.getUserDataFromVault(secretKey); +/// ``` #[wasm_bindgen(js_name = genSecretKey)] pub fn gen_secret_key() -> SecretKeyJs { let secret_key = bls::SecretKey::random(); @@ -337,6 +435,12 @@ pub fn funded_wallet() -> JsWallet { /// /// A level could be passed like `trace` or `warn`. Or set for a specific module/crate /// with `sn_networking=trace,autonomi=info`. +/// +/// # Example +/// +/// ```js +/// logInit("sn_networking=warn,autonomi=trace"); +/// ``` #[wasm_bindgen(js_name = logInit)] pub fn log_init(directive: String) { use tracing_subscriber::prelude::*; From 8ccc869bbae32b5dcfd29e887a05bb452db12ddb Mon Sep 17 00:00:00 2001 From: qima Date: Tue, 29 Oct 2024 00:15:37 +0800 Subject: [PATCH 112/128] chore(network): reduce outdated connection prunning frequency --- .github/workflows/merge.yml | 2 +- sn_networking/src/driver.rs | 3 +++ sn_networking/src/event/swarm.rs | 6 ++++++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index eb880a43f9..b56b2f83f1 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -670,7 +670,7 @@ jobs: timeout-minutes: 1 run: | peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) || { echo "Failed to extract peer removal count"; exit 1; } + rg "(\d+) matches" | rg "\d+" -o) || { echo "Failed to extract peer removal count"; exit 0; } if [ -z "$peer_removed" ]; then echo "No peer removal count found" exit 1 diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 30386e14d8..e70cc6c68d 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -721,6 +721,7 @@ impl NetworkBuilder { range_distances: VecDeque::with_capacity(GET_RANGE_STORAGE_LIMIT), first_contact_made: false, last_replication: None, + last_connection_pruning_time: Instant::now(), }; let network = Network::new( @@ -828,6 +829,8 @@ pub struct SwarmDriver { pub(crate) range_distances: VecDeque, // have we found out initial peer pub(crate) first_contact_made: bool, + /// when was the last outdated connection prunning undertaken. + pub(crate) last_connection_pruning_time: Instant, } impl SwarmDriver { diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 90a3939f47..c4de69665d 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -605,6 +605,12 @@ impl SwarmDriver { // Remove outdated connection to a peer if it is not in the RT. // Optionally force remove all the connections for a provided peer. fn remove_outdated_connections(&mut self) { + // To avoid this being called too frequenctly, only carry out prunning intervally. + if Instant::now() > self.last_connection_pruning_time + Duration::from_secs(30) { + return; + } + self.last_connection_pruning_time = Instant::now(); + let mut removed_conns = 0; self.live_connected_peers.retain(|connection_id, (peer_id, timeout_time)| { From 905365e7c656ea6979856f4a49aefb78c6a9ef64 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 28 Oct 2024 18:09:39 +0100 Subject: [PATCH 113/128] chore: updated arbitrum sepolia contracts --- evmlib/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index cc9163688c..e0df96d466 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -45,14 +45,14 @@ const ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS: Address = address!("4bc1aCE0E66170375462cB4E6Af42Ad4D5EC689C"); const ARBITRUM_SEPOLIA_PAYMENT_TOKEN_ADDRESS: Address = - address!("4bc1aCE0E66170375462cB4E6Af42Ad4D5EC689C"); + address!("BE1802c27C324a28aeBcd7eeC7D734246C807194"); // Should be updated when the smart contract changes! const ARBITRUM_ONE_DATA_PAYMENTS_ADDRESS: Address = address!("887930F30EDEb1B255Cd2273C3F4400919df2EFe"); const ARBITRUM_SEPOLIA_DATA_PAYMENTS_ADDRESS: Address = - address!("e6D6bB5Fa796baA8c1ADc439Ac0fd66fd2A1858b"); + address!("Dd56b03Dae2Ab8594D80269EC4518D13F1A110BD"); #[serde_as] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] From 586e94bc94cb07851b99d6d1b614eff85b3ab075 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 28 Oct 2024 19:06:16 +0100 Subject: [PATCH 114/128] fix(launchpad): using sn_node_current_reward_wallet_balance as attos --- node-launchpad/src/components/status.rs | 13 ++++++++----- node-launchpad/src/node_stats.rs | 20 +++++++++++++++++--- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 3feb403485..1e2d24ded3 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -207,7 +207,7 @@ impl Status<'_> { .iter() .find(|s| s.service_name == node_item.service_name) { - item.attos = stats.forwarded_rewards; + item.attos = stats.rewards_wallet_balance; item.memory = stats.memory_usage_mb; item.mbps = format!( "↓{:06.2} ↑{:06.2}", @@ -685,9 +685,12 @@ impl Component for Status<'_> { let total_attos_earned_and_wallet_row = Row::new(vec![ Cell::new("Attos Earned".to_string()).fg(VIVID_SKY_BLUE), - Cell::new(self.node_stats.total_forwarded_rewards.to_string()) - .fg(VIVID_SKY_BLUE) - .bold(), + Cell::new(format!( + "{:?}", + self.node_stats.total_rewards_wallet_balance + )) + .fg(VIVID_SKY_BLUE) + .bold(), Cell::new(Line::from(wallet_not_set).alignment(Alignment::Right)), ]); @@ -1017,7 +1020,7 @@ impl fmt::Display for NodeStatus { pub struct NodeItem<'a> { name: String, version: String, - attos: u64, + attos: usize, memory: usize, mbps: String, records: usize, diff --git a/node-launchpad/src/node_stats.rs b/node-launchpad/src/node_stats.rs index a68d0d1404..339ab24b36 100644 --- a/node-launchpad/src/node_stats.rs +++ b/node-launchpad/src/node_stats.rs @@ -20,7 +20,8 @@ use crate::action::{Action, StatusActions}; #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct IndividualNodeStats { pub service_name: String, - pub forwarded_rewards: u64, + pub forwarded_rewards: usize, + pub rewards_wallet_balance: usize, pub memory_usage_mb: usize, pub bandwidth_inbound: usize, pub bandwidth_outbound: usize, @@ -33,7 +34,8 @@ pub struct IndividualNodeStats { #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct NodeStats { - pub total_forwarded_rewards: u64, + pub total_forwarded_rewards: usize, + pub total_rewards_wallet_balance: usize, pub total_memory_usage_mb: usize, pub individual_stats: Vec, } @@ -41,6 +43,7 @@ pub struct NodeStats { impl NodeStats { fn merge(&mut self, other: &IndividualNodeStats) { self.total_forwarded_rewards += other.forwarded_rewards; + self.total_rewards_wallet_balance += other.rewards_wallet_balance; self.total_memory_usage_mb += other.memory_usage_mb; self.individual_stats.push(other.clone()); // Store individual stats } @@ -135,6 +138,7 @@ impl NodeStats { let individual_stats = IndividualNodeStats { service_name: service_name.clone(), forwarded_rewards: stats.forwarded_rewards, + rewards_wallet_balance: stats.rewards_wallet_balance, memory_usage_mb: stats.memory_usage_mb, bandwidth_inbound: stats.bandwidth_inbound, bandwidth_outbound: stats.bandwidth_outbound, @@ -181,7 +185,17 @@ impl NodeStats { prometheus_parse::Value::Counter(val) | prometheus_parse::Value::Gauge(val) | prometheus_parse::Value::Untyped(val) => { - stats.forwarded_rewards = val as u64; + stats.forwarded_rewards = val as usize; + } + _ => {} + } + } else if sample.metric == "sn_node_current_reward_wallet_balance" { + // Attos + match sample.value { + prometheus_parse::Value::Counter(val) + | prometheus_parse::Value::Gauge(val) + | prometheus_parse::Value::Untyped(val) => { + stats.rewards_wallet_balance = val as usize; } _ => {} } From 98021d9df7252a45827186777f9e1d33e6ac1553 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Mon, 28 Oct 2024 22:20:46 +0000 Subject: [PATCH 115/128] chore(release): release candidate 2024.10.4.2 ================== Crate Versions ================== autonomi: 0.2.2-rc.2 autonomi-cli: 0.1.3-rc.2 evmlib: 0.1.2-rc.2 evm_testnet: 0.1.2-rc.2 sn_build_info: 0.1.17-rc.2 sn_evm: 0.1.2-rc.2 sn_logging: 0.2.38-rc.2 sn_metrics: 0.1.18-rc.2 nat-detection: 0.2.9-rc.2 sn_networking: 0.19.1-rc.2 sn_node: 0.112.2-rc.2 node-launchpad: 0.4.2-rc.2 sn_node_manager: 0.11.1-rc.2 sn_node_rpc_client: 0.6.33-rc.2 sn_peers_acquisition: 0.5.5-rc.2 sn_protocol: 0.17.13-rc.2 sn_registers: 0.4.1-rc.2 sn_service_management: 0.4.1-rc.2 sn_transfers: 0.20.1-rc.2 test_utils: 0.4.9-rc.2 token_supplies: 0.1.56-rc.2 =================== Binary Versions =================== nat-detection: 0.2.9-rc.2 node-launchpad: 0.4.2-rc.2 autonomi: 0.1.3-rc.2 safenode: 0.112.2-rc.2 safenode-manager: 0.11.1-rc.2 safenode_rpc_client: 0.6.33-rc.2 safenodemand: 0.11.1-rc.2 --- Cargo.lock | 42 +++++++++++++++---------------- autonomi-cli/Cargo.toml | 12 ++++----- autonomi/Cargo.toml | 20 +++++++-------- evm_testnet/Cargo.toml | 6 ++--- evmlib/Cargo.toml | 2 +- nat-detection/Cargo.toml | 8 +++--- node-launchpad/Cargo.toml | 14 +++++------ release-cycle-info | 2 +- sn_build_info/Cargo.toml | 2 +- sn_build_info/src/release_info.rs | 2 +- sn_evm/Cargo.toml | 4 +-- sn_logging/Cargo.toml | 2 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 12 ++++----- sn_node/Cargo.toml | 28 ++++++++++----------- sn_node_manager/Cargo.toml | 16 ++++++------ sn_node_rpc_client/Cargo.toml | 16 ++++++------ sn_peers_acquisition/Cargo.toml | 4 +-- sn_protocol/Cargo.toml | 10 ++++---- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 8 +++--- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 6 ++--- token_supplies/Cargo.toml | 2 +- 24 files changed, 112 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9720dd2245..a30a1efbb3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1088,7 +1088,7 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "autonomi" -version = "0.2.2-rc.1" +version = "0.2.2-rc.2" dependencies = [ "alloy", "assert_matches", @@ -1139,7 +1139,7 @@ dependencies = [ [[package]] name = "autonomi-cli" -version = "0.1.3-rc.1" +version = "0.1.3-rc.2" dependencies = [ "autonomi", "clap", @@ -2792,7 +2792,7 @@ dependencies = [ [[package]] name = "evm_testnet" -version = "0.1.2-rc.1" +version = "0.1.2-rc.2" dependencies = [ "clap", "dirs-next", @@ -2803,7 +2803,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.2-rc.1" +version = "0.1.2-rc.2" dependencies = [ "alloy", "dirs-next", @@ -5613,7 +5613,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.9-rc.1" +version = "0.2.9-rc.2" dependencies = [ "clap", "clap-verbosity-flag", @@ -5730,7 +5730,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.4.2-rc.1" +version = "0.4.2-rc.2" dependencies = [ "atty", "better-panic", @@ -8095,7 +8095,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.11.1-rc.1" +version = "0.11.1-rc.2" dependencies = [ "assert_cmd", "assert_fs", @@ -8171,7 +8171,7 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.17-rc.1" +version = "0.1.17-rc.2" dependencies = [ "chrono", "tracing", @@ -8213,7 +8213,7 @@ dependencies = [ [[package]] name = "sn_evm" -version = "0.1.2-rc.1" +version = "0.1.2-rc.2" dependencies = [ "custom_debug", "evmlib", @@ -8236,7 +8236,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.38-rc.1" +version = "0.2.38-rc.2" dependencies = [ "chrono", "color-eyre", @@ -8261,7 +8261,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.18-rc.1" +version = "0.1.18-rc.2" dependencies = [ "clap", "color-eyre", @@ -8275,7 +8275,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.19.1-rc.1" +version = "0.19.1-rc.2" dependencies = [ "aes-gcm-siv", "async-channel", @@ -8321,7 +8321,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.112.2-rc.1" +version = "0.112.2-rc.2" dependencies = [ "assert_fs", "async-trait", @@ -8378,7 +8378,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.33-rc.1" +version = "0.6.33-rc.2" dependencies = [ "assert_fs", "async-trait", @@ -8405,7 +8405,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.5.5-rc.1" +version = "0.5.5-rc.2" dependencies = [ "clap", "lazy_static", @@ -8421,7 +8421,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.13-rc.1" +version = "0.17.13-rc.2" dependencies = [ "blsttc", "bytes", @@ -8451,7 +8451,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.4.1-rc.1" +version = "0.4.1-rc.2" dependencies = [ "blsttc", "crdts", @@ -8468,7 +8468,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.4.1-rc.1" +version = "0.4.1-rc.2" dependencies = [ "async-trait", "dirs-next", @@ -8494,7 +8494,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.20.1-rc.1" +version = "0.20.1-rc.2" dependencies = [ "assert_fs", "blsttc", @@ -8827,7 +8827,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.9-rc.1" +version = "0.4.9-rc.2" dependencies = [ "bytes", "color-eyre", @@ -8971,7 +8971,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.56-rc.1" +version = "0.1.56-rc.2" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index 0fb8b720f3..0ccab10317 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] name = "autonomi-cli" description = "Autonomi CLI" license = "GPL-3.0" -version = "0.1.3-rc.1" +version = "0.1.3-rc.2" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -24,7 +24,7 @@ name = "files" harness = false [dependencies] -autonomi = { path = "../autonomi", version = "0.2.2-rc.1", features = [ +autonomi = { path = "../autonomi", version = "0.2.2-rc.2", features = [ "data", "fs", "vault", @@ -45,15 +45,15 @@ tokio = { version = "1.32.0", features = [ "fs", ] } tracing = { version = "~0.1.26" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } walkdir = "2.5.0" serde_json = "1.0.132" serde = "1.0.210" [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.2.2-rc.1", features = [ +autonomi = { path = "../autonomi", version = "0.2.2-rc.2", features = [ "data", "fs", ] } diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 35aacea5f9..b7237672e7 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.2.2-rc.1" +version = "0.2.2-rc.2" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -40,11 +40,11 @@ rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.19.1-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } -sn_protocol = { version = "0.17.13-rc.1", path = "../sn_protocol" } -sn_registers = { path = "../sn_registers", version = "0.4.1-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } +sn_networking = { path = "../sn_networking", version = "0.19.1-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } +sn_protocol = { version = "0.17.13-rc.2", path = "../sn_protocol" } +sn_registers = { path = "../sn_registers", version = "0.4.1-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } thiserror = "1.0.23" tokio = { version = "1.35.0", features = ["sync"] } tracing = { version = "~0.1.26" } @@ -63,9 +63,9 @@ alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwe assert_matches = "1.5.0" eyre = "0.6.5" sha2 = "0.10.6" -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.4.1-rc.1", features = ["test-utils"] } +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.4.1-rc.2", features = ["test-utils"] } tempfile = "3.6.0" # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. @@ -76,7 +76,7 @@ wasm-bindgen-test = "0.3.43" [target.'cfg(target_arch = "wasm32")'.dependencies] console_error_panic_hook = "0.1.7" -evmlib = { path = "../evmlib", version = "0.1.2-rc.1", features = ["wasm-bindgen"] } +evmlib = { path = "../evmlib", version = "0.1.2-rc.2", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } js-sys = "0.3.70" diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml index e493b1ad79..e2c79cc40b 100644 --- a/evm_testnet/Cargo.toml +++ b/evm_testnet/Cargo.toml @@ -6,13 +6,13 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm_testnet" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.2-rc.1" +version = "0.1.2-rc.2" [dependencies] clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.2-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } +evmlib = { path = "../evmlib", version = "0.1.2-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 77079acd19..d58252a6d7 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.2-rc.1" +version = "0.1.2-rc.2" [features] wasm-bindgen = ["alloy/wasm-bindgen"] diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index f0f1c5867b..83a05f15a0 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.9-rc.1" +version = "0.2.9-rc.2" [[bin]] name = "nat-detection" @@ -31,9 +31,9 @@ libp2p = { version = "0.54.1", features = [ "macros", "upnp", ] } -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } -sn_networking = { path = "../sn_networking", version = "0.19.1-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } +sn_networking = { path = "../sn_networking", version = "0.19.1-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 21c1dba24f..680d3618d0 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.4.2-rc.1" +version = "0.4.2-rc.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -51,13 +51,13 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } -sn-node-manager = { version = "0.11.1-rc.1", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.5.5-rc.1", path = "../sn_peers_acquisition" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } +sn-node-manager = { version = "0.11.1-rc.2", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.5.5-rc.2", path = "../sn_peers_acquisition" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.4.1-rc.1", path = "../sn_service_management" } +sn_service_management = { version = "0.4.1-rc.2", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/release-cycle-info b/release-cycle-info index 96648b9415..9b8978040f 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -15,4 +15,4 @@ release-year: 2024 release-month: 10 release-cycle: 4 -release-cycle-counter: 1 +release-cycle-counter: 2 diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 101b48ae32..b91a71931a 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.17-rc.1" +version = "0.1.17-rc.2" build = "build.rs" include = ["Cargo.toml", "src/**/*", "build.rs"] diff --git a/sn_build_info/src/release_info.rs b/sn_build_info/src/release_info.rs index 8872025ec1..e9c752684e 100644 --- a/sn_build_info/src/release_info.rs +++ b/sn_build_info/src/release_info.rs @@ -1,4 +1,4 @@ pub const RELEASE_YEAR: &str = "2024"; pub const RELEASE_MONTH: &str = "10"; pub const RELEASE_CYCLE: &str = "4"; -pub const RELEASE_CYCLE_COUNTER: &str = "1"; +pub const RELEASE_CYCLE_COUNTER: &str = "2"; diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index 98e05e2973..27859ae7df 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_evm" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.2-rc.1" +version = "0.1.2-rc.2" [features] test-utils = [] @@ -17,7 +17,7 @@ external-signer = ["evmlib/external-signer"] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib", version = "0.1.2-rc.1" } +evmlib = { path = "../evmlib", version = "0.1.2-rc.2" } hex = "~0.4.3" lazy_static = "~1.4.0" libp2p = { version = "0.53", features = ["identify", "kad"] } diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index b214d63c1b..abddb1cd42 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.38-rc.1" +version = "0.2.38-rc.2" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index 8c166b1228..e13285aed2 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.18-rc.1" +version = "0.1.18-rc.2" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 49825bab92..ecd65bf9bc 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.19.1-rc.1" +version = "0.19.1-rc.2" [features] default = [] @@ -54,11 +54,11 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.4.1-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.4.1-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 56281251a8..cd248b6452 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.112.2-rc.1" +version = "0.112.2-rc.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -52,15 +52,15 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } -sn_networking = { path = "../sn_networking", version = "0.19.1-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.4.1-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1" } -sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } +sn_networking = { path = "../sn_networking", version = "0.19.1-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.4.1-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2" } +sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } sysinfo = { version = "0.30.8", default-features = false } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ @@ -83,16 +83,16 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] -evmlib = { path = "../evmlib", version = "0.1.2-rc.1" } -autonomi = { path = "../autonomi", version = "0.2.2-rc.1", features = ["registers"] } +evmlib = { path = "../evmlib", version = "0.1.2-rc.2" } +autonomi = { path = "../autonomi", version = "0.2.2-rc.2", features = ["registers"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1", features = [ +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index b76b2111bb..6b7879f22d 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.11.1-rc.1" +version = "0.11.1-rc.2" [[bin]] name = "safenode-manager" @@ -46,14 +46,14 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1" } -sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2" } +sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.2" } sn-releases = "0.2.6" -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 9c91a08764..c947bb632b 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.33-rc.1" +version = "0.6.33-rc.2" [[bin]] name = "safenode_rpc_client" @@ -26,13 +26,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } -sn_node = { path = "../sn_node", version = "0.112.2-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } +sn_node = { path = "../sn_node", version = "0.112.2-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 7670798cc5..0349565a03 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.5.5-rc.1" +version = "0.5.5-rc.2" [features] local = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1", optional = true} +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 82d8827b02..7666240409 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.13-rc.1" +version = "0.17.13-rc.2" [features] default = [] @@ -28,10 +28,10 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.4.1-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.4.1-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index 3446345669..4b9dbf8930 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.1-rc.1" +version = "0.4.1-rc.2" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 66ca3a2e26..e3f3886f0b 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.1-rc.1" +version = "0.4.1-rc.2" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.1", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2", features = [ "rpc", ] } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index ded837cef0..f76853cbc8 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.20.1-rc.1" +version = "0.20.1-rc.2" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 94dadc7e4a..5472b3033c 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.9-rc.1" +version = "0.4.9-rc.2" [features] local = ["sn_peers_acquisition/local"] @@ -16,9 +16,9 @@ local = ["sn_peers_acquisition/local"] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "~0.6.2" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.2-rc.1" } +evmlib = { path = "../evmlib", version = "0.1.2-rc.2" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } serde_json = "1.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index 81506b505e..b076a9a97f 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.56-rc.1" +version = "0.1.56-rc.2" [dependencies] From 62b127545677328e6e3da2a92897fc9365fe08bf Mon Sep 17 00:00:00 2001 From: grumbach Date: Mon, 28 Oct 2024 16:57:54 +0900 Subject: [PATCH 116/128] feat: improved fs uploads performance --- autonomi/src/client/fs.rs | 51 +++++++++++++++++++++++++------ autonomi/src/client/fs_private.rs | 51 +++++++++++++++++++++++++------ autonomi/src/client/utils.rs | 35 +++++++++++++++++++-- 3 files changed, 115 insertions(+), 22 deletions(-) diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index d7f243df68..8cb177a6d5 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -8,15 +8,19 @@ use crate::client::archive::Metadata; use crate::client::data::CostError; +use crate::client::utils::process_tasks_with_max_concurrency; use crate::client::Client; use bytes::Bytes; use sn_evm::EvmWallet; use sn_networking::target_arch::{Duration, SystemTime}; use std::path::PathBuf; +use tokio::task::JoinError; use super::archive::{Archive, ArchiveAddr}; use super::data::{DataAddr, GetError, PutError}; +pub(crate) const FILE_UPLOAD_BATCH_SIZE: usize = 128; + /// Errors that can occur during the file upload operation. #[cfg(feature = "fs")] #[derive(Debug, thiserror::Error)] @@ -29,6 +33,8 @@ pub enum UploadError { PutError(#[from] PutError), #[error("Failed to fetch file")] GetError(#[from] GetError), + #[error("Error in parralel processing")] + JoinError(#[from] JoinError), #[error("Failed to serialize")] Serialization(#[from] rmp_serde::encode::Error), #[error("Failed to deserialize")] @@ -96,30 +102,51 @@ impl Client { dir_path: PathBuf, wallet: &EvmWallet, ) -> Result { - let mut archive = Archive::new(); + info!("Uploading directory: {dir_path:?}"); + let start = tokio::time::Instant::now(); + // start upload of files in parallel + let mut upload_tasks = Vec::new(); for entry in walkdir::WalkDir::new(dir_path) { let entry = entry?; - if !entry.file_type().is_file() { continue; } - let path = entry.path().to_path_buf(); - tracing::info!("Uploading file: {path:?}"); - #[cfg(feature = "loud")] - println!("Uploading file: {path:?}"); - let file = self.file_upload(path.clone(), wallet).await?; - let metadata = metadata_from_entry(&entry); + let path = entry.path().to_path_buf(); + upload_tasks.push(async move { + let file = self.file_upload(path.clone(), wallet).await; + (path, metadata, file) + }); + } - archive.add_file(path, file, metadata); + // wait for all files to be uploaded + let uploads = + process_tasks_with_max_concurrency(upload_tasks, FILE_UPLOAD_BATCH_SIZE).await?; + info!( + "Upload of {} files completed in {:?}", + uploads.len(), + start.elapsed() + ); + let mut archive = Archive::new(); + for (path, metadata, maybe_file) in uploads.into_iter() { + match maybe_file { + Ok(file) => archive.add_file(path, file, metadata), + Err(err) => { + error!("Failed to upload file: {path:?}: {err:?}"); + return Err(err); + } + } } + // upload archive let archive_serialized = archive.into_bytes()?; - let arch_addr = self.data_put(archive_serialized, wallet).await?; + info!("Complete archive upload completed in {:?}", start.elapsed()); + #[cfg(feature = "loud")] + println!("Upload completed in {:?}", start.elapsed()); Ok(arch_addr) } @@ -130,6 +157,10 @@ impl Client { path: PathBuf, wallet: &EvmWallet, ) -> Result { + info!("Uploading file: {path:?}"); + #[cfg(feature = "loud")] + println!("Uploading file: {path:?}"); + let data = tokio::fs::read(path).await?; let data = Bytes::from(data); let addr = self.data_put(data, wallet).await?; diff --git a/autonomi/src/client/fs_private.rs b/autonomi/src/client/fs_private.rs index 0d9b819d70..5202f732fb 100644 --- a/autonomi/src/client/fs_private.rs +++ b/autonomi/src/client/fs_private.rs @@ -14,6 +14,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::client::utils::process_tasks_with_max_concurrency; use crate::client::Client; use bytes::Bytes; use sn_evm::EvmWallet; @@ -23,6 +24,8 @@ use super::archive_private::{PrivateArchive, PrivateArchiveAccess}; use super::data_private::PrivateDataAccess; use super::fs::{DownloadError, UploadError}; +use super::fs::FILE_UPLOAD_BATCH_SIZE; + impl Client { /// Download a private file from network to local file system pub async fn private_file_download( @@ -59,30 +62,54 @@ impl Client { dir_path: PathBuf, wallet: &EvmWallet, ) -> Result { - let mut archive = PrivateArchive::new(); + info!("Uploading directory as private: {dir_path:?}"); + let start = tokio::time::Instant::now(); + // start upload of file in parallel + let mut upload_tasks = Vec::new(); for entry in walkdir::WalkDir::new(dir_path) { let entry = entry?; - if !entry.file_type().is_file() { continue; } - let path = entry.path().to_path_buf(); - tracing::info!("Uploading file: {path:?}"); - #[cfg(feature = "loud")] - println!("Uploading file: {path:?}"); - let file = self.private_file_upload(path.clone(), wallet).await?; - let metadata = super::fs::metadata_from_entry(&entry); + let path = entry.path().to_path_buf(); + upload_tasks.push(async move { + let file = self.private_file_upload(path.clone(), wallet).await; + (path, metadata, file) + }); + } - archive.add_file(path, file, metadata); + // wait for all files to be uploaded + let uploads = + process_tasks_with_max_concurrency(upload_tasks, FILE_UPLOAD_BATCH_SIZE).await?; + info!( + "Upload of {} files completed in {:?}", + uploads.len(), + start.elapsed() + ); + let mut archive = PrivateArchive::new(); + for (path, metadata, maybe_file) in uploads.into_iter() { + match maybe_file { + Ok(file) => archive.add_file(path, file, metadata), + Err(err) => { + error!("Failed to upload file: {path:?}: {err:?}"); + return Err(err); + } + } } + // upload archive let archive_serialized = archive.into_bytes()?; - let arch_addr = self.private_data_put(archive_serialized, wallet).await?; + info!( + "Complete private archive upload completed in {:?}", + start.elapsed() + ); + #[cfg(feature = "loud")] + println!("Upload completed in {:?}", start.elapsed()); Ok(arch_addr) } @@ -93,6 +120,10 @@ impl Client { path: PathBuf, wallet: &EvmWallet, ) -> Result { + info!("Uploading file: {path:?}"); + #[cfg(feature = "loud")] + println!("Uploading file: {path:?}"); + let data = tokio::fs::read(path).await?; let data = Bytes::from(data); let addr = self.private_data_put(data, wallet).await?; diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index 68ae70f2f7..60a9f8c48c 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -6,9 +6,8 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use std::{collections::HashMap, num::NonZero}; - use bytes::Bytes; +use futures::stream::{FuturesUnordered, StreamExt}; use libp2p::kad::{Quorum, Record}; use rand::{thread_rng, Rng}; use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; @@ -21,6 +20,8 @@ use sn_protocol::{ storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind, RetryStrategy}, NetworkAddress, }; +use std::{collections::HashMap, future::Future, num::NonZero}; +use tokio::task::JoinError; use xor_name::XorName; use super::{ @@ -248,3 +249,33 @@ pub(crate) fn extract_quote_payments( (to_be_paid, already_paid) } + +pub(crate) async fn process_tasks_with_max_concurrency( + tasks: I, + batch_size: usize, +) -> Result, JoinError> +where + I: IntoIterator, + I::Item: Future + Send, + R: Send, +{ + let mut futures = FuturesUnordered::new(); + let mut results = Vec::new(); + + for task in tasks.into_iter() { + futures.push(task); + + if futures.len() >= batch_size { + if let Some(result) = futures.next().await { + results.push(result); + } + } + } + + // Process remaining tasks + while let Some(result) = futures.next().await { + results.push(result); + } + + Ok(results) +} From d7dc71e5be0b81774f99c61a0a2e8f8be9c67dab Mon Sep 17 00:00:00 2001 From: grumbach Date: Mon, 28 Oct 2024 18:01:30 +0900 Subject: [PATCH 117/128] feat: re-enable autonomi tests --- .github/workflows/merge.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index d639924585..8aedb0e6d2 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -110,6 +110,10 @@ jobs: - uses: Swatinem/rust-cache@v2 + - name: Run autonomi tests + timeout-minutes: 25 + run: cargo test --release --package autonomi --lib --features="full,fs" + - name: Run node tests timeout-minutes: 25 run: cargo test --release --package sn_node --lib From 899661c82d8beaa97de43f02b27985eece20fb20 Mon Sep 17 00:00:00 2001 From: grumbach Date: Mon, 28 Oct 2024 18:10:46 +0900 Subject: [PATCH 118/128] feat: batch size depends on CPU count --- autonomi/src/client/fs.rs | 11 +++++++++-- autonomi/src/client/fs_private.rs | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index 8cb177a6d5..5d2ad53215 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -14,12 +14,19 @@ use bytes::Bytes; use sn_evm::EvmWallet; use sn_networking::target_arch::{Duration, SystemTime}; use std::path::PathBuf; +use std::sync::LazyLock; use tokio::task::JoinError; use super::archive::{Archive, ArchiveAddr}; use super::data::{DataAddr, GetError, PutError}; -pub(crate) const FILE_UPLOAD_BATCH_SIZE: usize = 128; +/// Number of files to upload in parallel. +pub static FILE_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| { + std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(1) + * 16 +}); /// Errors that can occur during the file upload operation. #[cfg(feature = "fs")] @@ -123,7 +130,7 @@ impl Client { // wait for all files to be uploaded let uploads = - process_tasks_with_max_concurrency(upload_tasks, FILE_UPLOAD_BATCH_SIZE).await?; + process_tasks_with_max_concurrency(upload_tasks, *FILE_UPLOAD_BATCH_SIZE).await?; info!( "Upload of {} files completed in {:?}", uploads.len(), diff --git a/autonomi/src/client/fs_private.rs b/autonomi/src/client/fs_private.rs index 5202f732fb..b6ed2672f9 100644 --- a/autonomi/src/client/fs_private.rs +++ b/autonomi/src/client/fs_private.rs @@ -83,7 +83,7 @@ impl Client { // wait for all files to be uploaded let uploads = - process_tasks_with_max_concurrency(upload_tasks, FILE_UPLOAD_BATCH_SIZE).await?; + process_tasks_with_max_concurrency(upload_tasks, *FILE_UPLOAD_BATCH_SIZE).await?; info!( "Upload of {} files completed in {:?}", uploads.len(), From ade585f0cfebb707aea3b88d12976032b1883fcf Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 29 Oct 2024 13:16:26 +0900 Subject: [PATCH 119/128] feat: limited threads chunk uploads --- autonomi/src/client/data.rs | 50 ++++++++++++++++++----------- autonomi/src/client/data_private.rs | 30 ++++++++++------- autonomi/src/client/fs.rs | 2 +- 3 files changed, 52 insertions(+), 30 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 869022cd37..bababdda7d 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -8,11 +8,13 @@ use bytes::Bytes; use libp2p::kad::Quorum; -use tokio::task::{JoinError, JoinSet}; +use tokio::task::JoinError; use std::collections::HashSet; +use std::sync::LazyLock; use xor_name::XorName; +use crate::client::utils::process_tasks_with_max_concurrency; use crate::client::{ClientEvent, UploadSummary}; use crate::{self_encryption::encrypt, Client}; use sn_evm::{Amount, AttoTokens}; @@ -23,6 +25,14 @@ use sn_protocol::{ NetworkAddress, }; +/// Number of chunks to upload in parallel. +pub static CHUNK_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| { + std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(1) + * 8 +}); + /// Raw Data Address (points to a DataMap) pub type DataAddr = XorName; /// Raw Chunk Address (points to a [`Chunk`]) @@ -110,12 +120,9 @@ impl Client { pub async fn data_put(&self, data: Bytes, wallet: &EvmWallet) -> Result { let now = sn_networking::target_arch::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; - info!( - "Uploading datamap chunk to the network at: {:?}", - data_map_chunk.address() - ); - + let data_map_addr = data_map_chunk.address(); debug!("Encryption took: {:.2?}", now.elapsed()); + info!("Uploading datamap chunk to the network at: {data_map_addr:?}"); let map_xor_name = *data_map_chunk.address().xorname(); let mut xor_names = vec![map_xor_name]; @@ -131,17 +138,15 @@ impl Client { .await .inspect_err(|err| error!("Error paying for data: {err:?}"))?; - let mut record_count = 0; - // Upload all the chunks in parallel including the data map chunk debug!("Uploading {} chunks", chunks.len()); - let mut tasks = JoinSet::new(); + let mut upload_tasks = vec![]; for chunk in chunks.into_iter().chain(std::iter::once(data_map_chunk)) { let self_clone = self.clone(); let address = *chunk.address(); if let Some(proof) = payment_proofs.get(chunk.name()) { let proof_clone = proof.clone(); - tasks.spawn(async move { + upload_tasks.push(async move { self_clone .chunk_upload_with_payment(chunk, proof_clone) .await @@ -151,14 +156,23 @@ impl Client { debug!("Chunk at {address:?} was already paid for so skipping"); } } - while let Some(result) = tasks.join_next().await { - result - .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) - .map_err(PutError::JoinError)? - .inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; - record_count += 1; - } - + let uploads = process_tasks_with_max_concurrency(upload_tasks, *CHUNK_UPLOAD_BATCH_SIZE) + .await + .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) + .map_err(PutError::JoinError)?; + + // Check for errors + let total_uploads = uploads.len(); + let ok_uploads = uploads + .iter() + .filter_map(|up| up.is_ok().then_some(())) + .count(); + info!("Uploaded {} chunks out of {}", ok_uploads, total_uploads); + let uploads: Result, _> = uploads.into_iter().collect(); + uploads.inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; + let record_count = ok_uploads; + + // Reporting if let Some(channel) = self.client_event_sender.as_ref() { let tokens_spent = payment_proofs .values() diff --git a/autonomi/src/client/data_private.rs b/autonomi/src/client/data_private.rs index b6d0bfa8a3..d464f4db4e 100644 --- a/autonomi/src/client/data_private.rs +++ b/autonomi/src/client/data_private.rs @@ -12,9 +12,10 @@ use bytes::Bytes; use serde::{Deserialize, Serialize}; use sn_evm::{Amount, EvmWallet}; use sn_protocol::storage::Chunk; -use tokio::task::JoinSet; +use super::data::CHUNK_UPLOAD_BATCH_SIZE; use super::data::{GetError, PutError}; +use crate::client::utils::process_tasks_with_max_concurrency; use crate::client::{ClientEvent, UploadSummary}; use crate::{self_encryption::encrypt, Client}; @@ -78,15 +79,14 @@ impl Client { .inspect_err(|err| error!("Error paying for data: {err:?}"))?; // Upload the chunks with the payments - let mut record_count = 0; debug!("Uploading {} chunks", chunks.len()); - let mut tasks = JoinSet::new(); + let mut upload_tasks = vec![]; for chunk in chunks { let self_clone = self.clone(); let address = *chunk.address(); if let Some(proof) = payment_proofs.get(chunk.name()) { let proof_clone = proof.clone(); - tasks.spawn(async move { + upload_tasks.push(async move { self_clone .chunk_upload_with_payment(chunk, proof_clone) .await @@ -96,13 +96,21 @@ impl Client { debug!("Chunk at {address:?} was already paid for so skipping"); } } - while let Some(result) = tasks.join_next().await { - result - .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) - .map_err(PutError::JoinError)? - .inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; - record_count += 1; - } + let uploads = process_tasks_with_max_concurrency(upload_tasks, *CHUNK_UPLOAD_BATCH_SIZE) + .await + .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) + .map_err(PutError::JoinError)?; + + // Check for errors + let total_uploads = uploads.len(); + let ok_uploads = uploads + .iter() + .filter_map(|up| up.is_ok().then_some(())) + .count(); + info!("Uploaded {} chunks out of {}", ok_uploads, total_uploads); + let uploads: Result, _> = uploads.into_iter().collect(); + uploads.inspect_err(|err| error!("Error uploading chunk: {err:?}"))?; + let record_count = ok_uploads; // Reporting if let Some(channel) = self.client_event_sender.as_ref() { diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index 5d2ad53215..c61aea0eb3 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -25,7 +25,7 @@ pub static FILE_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| { std::thread::available_parallelism() .map(|n| n.get()) .unwrap_or(1) - * 16 + * 8 }); /// Errors that can occur during the file upload operation. From 25e48e15372b2732e85027fd68b3bbed39ac59c6 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 29 Oct 2024 13:22:01 +0900 Subject: [PATCH 120/128] feat: customizable upload batch size from env var, defaults to 8 per cpu count --- autonomi/src/client/data.rs | 16 ++++++++++++---- autonomi/src/client/fs.rs | 16 ++++++++++++---- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index bababdda7d..df1c10309e 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -26,11 +26,19 @@ use sn_protocol::{ }; /// Number of chunks to upload in parallel. +/// Can be overridden by the `CHUNK_UPLOAD_BATCH_SIZE` environment variable. pub static CHUNK_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| { - std::thread::available_parallelism() - .map(|n| n.get()) - .unwrap_or(1) - * 8 + let batch_size = std::env::var("CHUNK_UPLOAD_BATCH_SIZE") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or( + std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(1) + * 8, + ); + info!("Chunk upload batch size: {}", batch_size); + batch_size }); /// Raw Data Address (points to a DataMap) diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index c61aea0eb3..c42c2d10bc 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -21,11 +21,19 @@ use super::archive::{Archive, ArchiveAddr}; use super::data::{DataAddr, GetError, PutError}; /// Number of files to upload in parallel. +/// Can be overridden by the `FILE_UPLOAD_BATCH_SIZE` environment variable. pub static FILE_UPLOAD_BATCH_SIZE: LazyLock = LazyLock::new(|| { - std::thread::available_parallelism() - .map(|n| n.get()) - .unwrap_or(1) - * 8 + let batch_size = std::env::var("FILE_UPLOAD_BATCH_SIZE") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or( + std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(1) + * 8, + ); + info!("File upload batch size: {}", batch_size); + batch_size }); /// Errors that can occur during the file upload operation. From d273e2f7a147e253248908c80364eed2e3113eac Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 29 Oct 2024 14:46:15 +0900 Subject: [PATCH 121/128] feat: add wallet lock to prevent concurrent payments --- autonomi/src/client/utils.rs | 9 +++++++++ evmlib/src/wallet.rs | 14 +++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index 60a9f8c48c..be26e35fd2 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -159,6 +159,11 @@ impl Client { let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); + // Make sure nobody else can use the wallet while we are paying + debug!("Waiting for wallet lock"); + let lock_guard = wallet.lock().await; + debug!("Locked wallet"); + // TODO: the error might contain some succeeded quote payments as well. These should be returned on err, so that they can be skipped when retrying. // TODO: retry when it fails? // Execute chunk payments @@ -167,6 +172,10 @@ impl Client { .await .map_err(|err| PayError::from(err.0))?; + // payment is done, unlock the wallet for other threads + drop(lock_guard); + debug!("Unlocked wallet"); + let proofs = payment_proof_from_quotes_and_payments(&cost_map, &payments); trace!( diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index b9504f69a1..b6719be336 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -22,6 +22,7 @@ use alloy::signers::local::{LocalSigner, PrivateKeySigner}; use alloy::transports::http::{reqwest, Client, Http}; use alloy::transports::{RpcError, TransportErrorKind}; use std::collections::BTreeMap; +use std::sync::Arc; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -39,12 +40,17 @@ pub enum Error { pub struct Wallet { wallet: EthereumWallet, network: Network, + lock: Arc>, } impl Wallet { /// Creates a new Wallet object with the specific Network and EthereumWallet. pub fn new(network: Network, wallet: EthereumWallet) -> Self { - Self { wallet, network } + Self { + wallet, + network, + lock: Arc::new(tokio::sync::Mutex::new(())), + } } /// Convenience function that creates a new Wallet with a random EthereumWallet. @@ -136,6 +142,12 @@ impl Wallet { pub fn to_provider(&self) -> ProviderWithWallet { http_provider_with_wallet(self.network.rpc_url().clone(), self.wallet.clone()) } + + /// Lock the wallet to prevent concurrent use. + /// Drop the guard to unlock the wallet. + pub async fn lock(&self) -> tokio::sync::MutexGuard<()> { + self.lock.lock().await + } } /// Generate an EthereumWallet with a random private key. From 436c24a6f3b6654e8fe3111f011e237d03741c08 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 29 Oct 2024 15:31:36 +0900 Subject: [PATCH 122/128] fix: remove useless join error --- autonomi/src/client/data.rs | 6 ++---- autonomi/src/client/data_private.rs | 6 ++---- autonomi/src/client/fs.rs | 5 +---- autonomi/src/client/fs_private.rs | 2 +- autonomi/src/client/utils.rs | 8 ++------ 5 files changed, 8 insertions(+), 19 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 06975919f5..4902f33c5e 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -162,10 +162,8 @@ impl Client { debug!("Chunk at {address:?} was already paid for so skipping"); } } - let uploads = process_tasks_with_max_concurrency(upload_tasks, *CHUNK_UPLOAD_BATCH_SIZE) - .await - .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) - .map_err(PutError::JoinError)?; + let uploads = + process_tasks_with_max_concurrency(upload_tasks, *CHUNK_UPLOAD_BATCH_SIZE).await; // Check for errors let total_uploads = uploads.len(); diff --git a/autonomi/src/client/data_private.rs b/autonomi/src/client/data_private.rs index d464f4db4e..d2ecaf0a2b 100644 --- a/autonomi/src/client/data_private.rs +++ b/autonomi/src/client/data_private.rs @@ -96,10 +96,8 @@ impl Client { debug!("Chunk at {address:?} was already paid for so skipping"); } } - let uploads = process_tasks_with_max_concurrency(upload_tasks, *CHUNK_UPLOAD_BATCH_SIZE) - .await - .inspect_err(|err| error!("Join error uploading chunk: {err:?}")) - .map_err(PutError::JoinError)?; + let uploads = + process_tasks_with_max_concurrency(upload_tasks, *CHUNK_UPLOAD_BATCH_SIZE).await; // Check for errors let total_uploads = uploads.len(); diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index c42c2d10bc..c1505224bc 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -15,7 +15,6 @@ use sn_evm::EvmWallet; use sn_networking::target_arch::{Duration, SystemTime}; use std::path::PathBuf; use std::sync::LazyLock; -use tokio::task::JoinError; use super::archive::{Archive, ArchiveAddr}; use super::data::{DataAddr, GetError, PutError}; @@ -48,8 +47,6 @@ pub enum UploadError { PutError(#[from] PutError), #[error("Failed to fetch file")] GetError(#[from] GetError), - #[error("Error in parralel processing")] - JoinError(#[from] JoinError), #[error("Failed to serialize")] Serialization(#[from] rmp_serde::encode::Error), #[error("Failed to deserialize")] @@ -138,7 +135,7 @@ impl Client { // wait for all files to be uploaded let uploads = - process_tasks_with_max_concurrency(upload_tasks, *FILE_UPLOAD_BATCH_SIZE).await?; + process_tasks_with_max_concurrency(upload_tasks, *FILE_UPLOAD_BATCH_SIZE).await; info!( "Upload of {} files completed in {:?}", uploads.len(), diff --git a/autonomi/src/client/fs_private.rs b/autonomi/src/client/fs_private.rs index b6ed2672f9..08d453ae37 100644 --- a/autonomi/src/client/fs_private.rs +++ b/autonomi/src/client/fs_private.rs @@ -83,7 +83,7 @@ impl Client { // wait for all files to be uploaded let uploads = - process_tasks_with_max_concurrency(upload_tasks, *FILE_UPLOAD_BATCH_SIZE).await?; + process_tasks_with_max_concurrency(upload_tasks, *FILE_UPLOAD_BATCH_SIZE).await; info!( "Upload of {} files completed in {:?}", uploads.len(), diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index be26e35fd2..95d70b6e4d 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -21,7 +21,6 @@ use sn_protocol::{ NetworkAddress, }; use std::{collections::HashMap, future::Future, num::NonZero}; -use tokio::task::JoinError; use xor_name::XorName; use super::{ @@ -259,10 +258,7 @@ pub(crate) fn extract_quote_payments( (to_be_paid, already_paid) } -pub(crate) async fn process_tasks_with_max_concurrency( - tasks: I, - batch_size: usize, -) -> Result, JoinError> +pub(crate) async fn process_tasks_with_max_concurrency(tasks: I, batch_size: usize) -> Vec where I: IntoIterator, I::Item: Future + Send, @@ -286,5 +282,5 @@ where results.push(result); } - Ok(results) + results } From 989761b67d466160b983337b4915ee6e35aff298 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 29 Oct 2024 15:43:22 +0900 Subject: [PATCH 123/128] chore: remove sneaky useless errr --- autonomi/src/client/data.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 4902f33c5e..0a6be8598a 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -8,7 +8,6 @@ use bytes::Bytes; use libp2p::kad::Quorum; -use tokio::task::JoinError; use std::collections::HashSet; use std::sync::LazyLock; @@ -59,8 +58,6 @@ pub enum PutError { PayError(#[from] PayError), #[error("Serialization error: {0}")] Serialization(String), - #[error("Join error uploading chunk.")] - JoinError(#[from] JoinError), #[error("A wallet error occurred.")] Wallet(#[from] sn_evm::EvmError), #[error("The vault owner key does not match the client's public key")] From bc68e90f013ef4649441a00caee266fb87ed3a56 Mon Sep 17 00:00:00 2001 From: Ermine Jose Date: Wed, 16 Oct 2024 16:23:13 +0530 Subject: [PATCH 124/128] feat(cli): add wallet support --- Cargo.lock | 26 ++++++ autonomi-cli/Cargo.toml | 2 + autonomi-cli/src/access/data_dir.rs | 2 +- autonomi-cli/src/commands.rs | 36 +++++++ autonomi-cli/src/commands/wallet.rs | 100 ++++++++++++++++++++ autonomi/src/lib.rs | 1 + autonomi/src/wallet.rs | 139 ++++++++++++++++++++++++++++ evmlib/src/wallet.rs | 5 + sn_evm/Cargo.toml | 4 + sn_evm/src/encryption.rs | 134 +++++++++++++++++++++++++++ sn_evm/src/error.rs | 9 ++ sn_evm/src/lib.rs | 2 + sn_evm/src/wallet.rs | 103 +++++++++++++++++++++ 13 files changed, 562 insertions(+), 1 deletion(-) create mode 100644 autonomi-cli/src/commands/wallet.rs create mode 100644 autonomi/src/wallet.rs create mode 100644 sn_evm/src/encryption.rs create mode 100644 sn_evm/src/wallet.rs diff --git a/Cargo.lock b/Cargo.lock index 1784d62f30..e04a254cbf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1134,12 +1134,14 @@ dependencies = [ "indicatif", "rand 0.8.5", "rayon", + "rpassword", "serde", "serde_json", "sn_build_info", "sn_logging", "sn_peers_acquisition", "tempfile", + "thiserror", "tokio", "tracing", "walkdir", @@ -7366,6 +7368,17 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "rpassword" +version = "7.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f" +dependencies = [ + "libc", + "rtoolbox", + "windows-sys 0.48.0", +] + [[package]] name = "rtnetlink" version = "0.10.1" @@ -7381,6 +7394,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "rtoolbox" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "ruint" version = "1.12.3" @@ -8199,7 +8222,9 @@ dependencies = [ name = "sn_evm" version = "0.1.2-rc.2" dependencies = [ + "color-eyre", "custom_debug", + "dirs-next", "evmlib", "hex 0.4.3", "lazy_static", @@ -8207,6 +8232,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rmp-serde", + "rpassword", "serde", "serde_json", "tempfile", diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index 0ccab10317..25be46525c 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -34,7 +34,9 @@ autonomi = { path = "../autonomi", version = "0.2.2-rc.2", features = [ clap = { version = "4.2.1", features = ["derive"] } color-eyre = "~0.6" dirs-next = "~2.0.0" +thiserror = "1.0" indicatif = { version = "0.17.5", features = ["tokio"] } +rpassword = "7.0" tokio = { version = "1.32.0", features = [ "io-util", "macros", diff --git a/autonomi-cli/src/access/data_dir.rs b/autonomi-cli/src/access/data_dir.rs index 9233507264..c934ee7b16 100644 --- a/autonomi-cli/src/access/data_dir.rs +++ b/autonomi-cli/src/access/data_dir.rs @@ -25,4 +25,4 @@ pub fn get_client_data_dir_path() -> Result { ) })?; Ok(home_dirs) -} +} \ No newline at end of file diff --git a/autonomi-cli/src/commands.rs b/autonomi-cli/src/commands.rs index c374eca78f..6bce261b14 100644 --- a/autonomi-cli/src/commands.rs +++ b/autonomi-cli/src/commands.rs @@ -9,6 +9,7 @@ mod file; mod register; mod vault; +mod wallet; use clap::Subcommand; use color_eyre::Result; @@ -34,6 +35,12 @@ pub enum SubCmd { #[command(subcommand)] command: VaultCmd, }, + + /// Operations related to wallet management. + Wallet { + #[command(subcommand)] + command: WalletCmd, + } } #[derive(Subcommand, Debug)] @@ -145,6 +152,24 @@ pub enum VaultCmd { }, } +#[derive(Subcommand, Debug)] +pub enum WalletCmd { + /// Create a wallet + // #[command(subcommand)] + Create { + #[arg(long)] + encrypt: Option, + #[arg(long)] + password: Option, + #[arg(long)] + private_key: Option, + + }, + + /// Check the balance of the wallet + Balance, +} + pub async fn handle_subcommand(opt: Opt) -> Result<()> { let peers = crate::access::network::get_peers(opt.peers); let cmd = opt.command; @@ -180,5 +205,16 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { VaultCmd::Load => vault::load(peers.await?).await, VaultCmd::Sync { force } => vault::sync(peers.await?, force).await, }, + SubCmd::Wallet { command } => match command { + WalletCmd::Create { + encrypt, + password, + private_key, + + } => { + wallet::create( encrypt, password,private_key) + }, + WalletCmd::Balance => Ok(wallet::balance()?), + } } } diff --git a/autonomi-cli/src/commands/wallet.rs b/autonomi-cli/src/commands/wallet.rs new file mode 100644 index 0000000000..8dcfb25802 --- /dev/null +++ b/autonomi-cli/src/commands/wallet.rs @@ -0,0 +1,100 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + + +use autonomi::wallet::*; +use color_eyre::Result; +use rpassword::read_password; + + +pub fn process_password(encryption: Option, password: Option) -> Option { + match encryption { + Some(value) => { + if !(value == "Y" || value == "y" || value == "Yes" || value == "YES" || value == "yes") { + println!("value: {}", value); + return None; + } + match password { + Some(passw) => { + return Some(passw); + } + None => { + //prompt for the password + println!("Please enter the Password"); + let input_password = read_password(); + let pwd = match input_password { + Ok(pwd) => pwd, + Err(e) => { + eprintln!("Failed to read password: {}",e); + println!("Try again..."); + panic!("issue with password"); + } + }; + Some(pwd) + } + } + } + _ => None, + } +} + + + +pub fn create(encryption: Option, password: Option, private_key: Option) -> Result<()>{ + let pass = process_password(encryption, password); + + match private_key { + Some(priv_key) => { + import_new_wallet(priv_key, pass) + }, + None => create_new_wallet(pass), + } +} + +pub fn import_new_wallet(private_key: String, encryption: Option) -> Result<()> { + let mut file_path = import_evm_wallet(private_key); + + if let Some(passw) = encryption { + file_path = encrypt_evm_wallet(file_path?, passw); + } + + println!("The wallet is imported here: {}", file_path?); + Ok(()) +} +pub fn create_new_wallet(encryption: Option) -> Result<()> { + let mut file_path = create_evm_wallet(); + + if let Some(passw) = encryption { + file_path = encrypt_evm_wallet(file_path?, passw); + } + + println!("The wallet is created here: {}", file_path?); + Ok(()) +} + +pub fn balance() -> Result<()> { + // list_available_public_wallets + // Call the function to get numbered file names as a HashMap + let get_client_data_dir_path = get_wallet_directory(); + + let files = get_numbered_files(get_client_data_dir_path)?; + + let mut sorted_files: Vec<(&u32, &(String,String))> = files.iter().collect(); + sorted_files.sort_by_key(|&(key, _)| key); + // Print the HashMap + for (key, value) in sorted_files { + println!("{}: - {} - {}", key, value.0, value.1); + } + + let key = prompt_for_key()?; + + if let Ok(private_key) = get_private_key_from_wallet(key, files){ + get_wallet_information(private_key); + } + Ok(()) +} \ No newline at end of file diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index c73bef1378..0d8e18a318 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -33,6 +33,7 @@ extern crate tracing; pub mod client; +pub mod wallet; #[cfg(feature = "data")] mod self_encryption; mod utils; diff --git a/autonomi/src/wallet.rs b/autonomi/src/wallet.rs new file mode 100644 index 0000000000..302a1f5744 --- /dev/null +++ b/autonomi/src/wallet.rs @@ -0,0 +1,139 @@ +use std::io::Write; +use std::path::{Path, PathBuf}; +use std::collections::HashMap; +use sn_evm::EvmError; + + // wallet_encryption_storage(&file_path, encrypted_private_key) +use sn_evm::wallet::{get_client_wallet_dir_path,prompt_the_user_for_password,create_a_evm_wallet, create_file_with_keys, get_gas_token_details, get_random_private_key, wallet_encryption_status, wallet_encryption_storage, ENCRYPTED_MAIN_SECRET_KEY_FILENAME}; + +use sn_evm::encryption::{decrypt_secret_key,encrypt_secret_key}; + +pub fn import_evm_wallet(wallet_private_key: String) -> Result { + let wallet_public_key = create_a_evm_wallet(&wallet_private_key)?; + + let file_path = create_file_with_keys(wallet_private_key, wallet_public_key)?; + Ok(file_path) +} + +pub fn get_wallet_information(private_key: String){ + let _ = get_gas_token_details(&private_key); +} + +pub fn create_evm_wallet() -> Result { + let wallet_private_key = get_random_private_key(); + let wallet_public_key = create_a_evm_wallet(&wallet_private_key)?; + let file_path = create_file_with_keys(wallet_private_key, wallet_public_key)?; + // println!("A file is created with the path: {}", file_path); + Ok(file_path) +} + +pub fn encrypt_evm_wallet(file_path: String, password: String) -> Result { + if wallet_encryption_status(Path::new(&file_path)) { + return Ok(String::from("Provided File is already encrypted")); //replace with error condition later. + } + + let private_key = std::fs::read_to_string(&file_path) + .map_err(|_| EvmError::OperationError("Not able to get the file dir path".to_string()))?; + + let encrypted_private_key = encrypt_secret_key(&private_key, &password)?; + //make the wallet a directory. + + if Path::new(&file_path).is_file() { + std::fs::remove_file(&file_path).expect("not able to remove the file"); + std::fs::create_dir(&file_path).expect("not able to create the directory"); + } + + wallet_encryption_storage(&file_path, &encrypted_private_key) +} + + + +pub fn get_private_key_from_wallet(key: u32, files: HashMap) -> Result { + + match files.get(&key) { + Some(value) => { + let mut wallet_directory = get_wallet_directory(); + wallet_directory.push(value.1.clone()); + if value.0 == "unprotected" { + let file_contents= std::fs::read(&wallet_directory); + if let Ok(file_data) = file_contents { + let private_key = String::from_utf8(file_data).map_err(|_| EvmError::OperationError("Not able to convert file contents".to_string()))?; + + return Ok(private_key); + } + } + + if value.0 =="passw-protected" { + let _ = wallet_directory.push(ENCRYPTED_MAIN_SECRET_KEY_FILENAME); + println!("encrypted wallet path: {:?}", wallet_directory); + let encrypted_bytes = std::fs::read(wallet_directory); + if let Ok(file_data) = encrypted_bytes { + let encrypted_private_key = String::from_utf8(file_data).map_err(|_| EvmError::OperationError("Not able to convert file contents".to_string()))?; + let password = prompt_the_user_for_password(); + match password { + Some(value) => { + println!("Recieved the password, initiating decryption!"); + let private_key = decrypt_secret_key(&encrypted_private_key, &value)?; + return Ok(private_key); + }, + None => { + println!("The provided Password is not proper, can not proceed further"); + } + } + + } + } + }, + None => { + println!("Provided Key doesn't exist try again"); + }, + } + let return_value:Option = None; + let value = return_value.ok_or(EvmError::OperationError("Not able to get the private key".to_string())); + value + +} + +pub fn get_numbered_files(dir: PathBuf) -> std::io::Result> { + let mut file_map:HashMap = std::collections::HashMap::new(); // Create a new HashMap to store the files + let entries = std::fs::read_dir(dir)?; // Get an iterator over directory entries + + // Iterate over the entries and insert them into the HashMap + for (index, entry) in entries.enumerate() { + let entry = entry?; // Unwrap the entry from Result + let mut path = entry.path(); // Get the path of the entry + + if let Some(name) = path.file_name() { + let file_name = name.to_string_lossy().into_owned(); // Convert to String + let mut wallet_details =None; + if path.is_file() { + wallet_details = Some((String::from("unprotected"), file_name)); + } else if path.is_dir() { + path.push(ENCRYPTED_MAIN_SECRET_KEY_FILENAME); + if path.is_file() { + wallet_details = Some((String::from("passw-protected"), file_name)); + } + } + if let Some(wallet_value) = wallet_details { + file_map.insert((index + 1) as u32, wallet_value); + } + // Insert into HashMap with number as key + } + } + Ok(file_map) +} + +// Function to prompt the user for a key +pub fn prompt_for_key() -> Result { + print!("Enter a key to retrieve the file: "); + std::io::stdout().flush().map_err(|_| EvmError::OperationError("Not able to flush out stdio".to_string()))?; + let mut input = String::new(); + std::io::stdin().read_line(&mut input).map_err(|_| EvmError::OperationError("Not able to read the line contents".to_string()))?; + + let input_value: u32 = input.trim().parse().map_err(|_| EvmError::OperationError("Not able to read the line contents".to_string()))?; + Ok(input_value) +} + +pub fn get_wallet_directory() -> PathBuf { + get_client_wallet_dir_path().expect("error") +} \ No newline at end of file diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index b6719be336..e11a066131 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -192,6 +192,11 @@ pub fn wallet_address(wallet: &EthereumWallet) -> Address { >::default_signer_address(wallet) } +pub fn get_random_private_key_for_wallet() -> String { + let signer: PrivateKeySigner = LocalSigner::random(); + signer.to_bytes().to_string() +} + /// Returns the raw balance of payment tokens for this wallet. pub async fn balance_of_tokens( account: Address, diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index 27859ae7df..df1c9d74ff 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -31,6 +31,10 @@ tracing = { version = "~0.1.26" } xor_name = "5.0.0" ring = "0.17.8" tempfile = "3.10.1" +tokio = { version = "1", features = ["full"] } +rpassword = "7.0" +color-eyre = "~0.6" +dirs-next = "~2.0.0" [dev-dependencies] tokio = { version = "1.32.0", features = ["macros", "rt"] } diff --git a/sn_evm/src/encryption.rs b/sn_evm/src/encryption.rs new file mode 100644 index 0000000000..134f13d93e --- /dev/null +++ b/sn_evm/src/encryption.rs @@ -0,0 +1,134 @@ +use rand::Rng; +use std::num::NonZeroU32; +use ring::aead::{BoundKey, Nonce, NonceSequence}; +use ring::error::Unspecified; +use crate::EvmError; +struct NonceSeq([u8; 12]); + +impl NonceSequence for NonceSeq { + fn advance(&mut self) -> std::result::Result { + Nonce::try_assume_unique_for_key(&self.0) + } +} + + +/// Number of iterations for pbkdf2. +const ITERATIONS: NonZeroU32 = match NonZeroU32::new(100_000) { + Some(v) => v, + None => panic!("`100_000` is not be zero"), +}; + +const SALT_LENGTH: usize = 8; +const NONCE_LENGTH: usize = 12; + +pub fn encrypt_secret_key( + secret_key: &str, + password: &str, +) -> Result { + // Generate a random salt + // Salt is used to ensure unique derived keys even for identical passwords + let mut salt = [0u8; SALT_LENGTH]; + rand::thread_rng().fill(&mut salt); + + // Generate a random nonce + // Nonce is used to ensure unique encryption outputs even for identical inputs + let mut nonce = [0u8; NONCE_LENGTH]; + rand::thread_rng().fill(&mut nonce); + + let mut key = [0; 32]; + + // Derive a key from the password using PBKDF2 with HMAC + // PBKDF2 is used for key derivation to mitigate brute-force attacks by making key derivation computationally expensive + // HMAC is used as the pseudorandom function for its security properties + ring::pbkdf2::derive( + ring::pbkdf2::PBKDF2_HMAC_SHA512, + ITERATIONS, + &salt, + password.as_bytes(), + &mut key, + ); + + // Create an unbound key using CHACHA20_POLY1305 algorithm + // CHACHA20_POLY1305 is a fast and secure AEAD (Authenticated Encryption with Associated Data) algorithm + let unbound_key = ring::aead::UnboundKey::new(&ring::aead::CHACHA20_POLY1305, &key) + .map_err(|_| EvmError::FailedToEncryptKey(String::from("Could not create unbound key.")))?; + + // Create a sealing key with the unbound key and nonce + let mut sealing_key = ring::aead::SealingKey::new(unbound_key, NonceSeq(nonce)); + let aad = ring::aead::Aad::from(&[]); + + // Convert the secret key to bytes + let secret_key_bytes = String::from(secret_key).into_bytes(); + let mut encrypted_secret_key = secret_key_bytes; + + // seal_in_place_append_tag encrypts the data and appends an authentication tag to ensure data integrity + sealing_key + .seal_in_place_append_tag(aad, &mut encrypted_secret_key) + .map_err(|_| EvmError::FailedToEncryptKey(String::from("Could not seal sealing key.")))?; + + // encrypted_secret_key.extend_from_slice(&salt); + // encrypted_secret_key.extend_from_slice(&salt); + let mut encrypted_data = Vec::new(); + encrypted_data.extend_from_slice(&salt); + encrypted_data.extend_from_slice(&nonce); + encrypted_data.extend_from_slice(&encrypted_secret_key); + + // Return the encrypted secret key along with salt and nonce encoded as hex strings + Ok(hex::encode(encrypted_data)) +} + + +pub fn decrypt_secret_key( + encrypted_data: &str, + password: &str + ) -> Result { + + let encrypted_data = hex::decode(encrypted_data) + .map_err(|_| EvmError::FailedToDecryptKey(String::from("Could not seal sealing key.")))?; + let salt: [u8; SALT_LENGTH] = encrypted_data[..SALT_LENGTH] + .try_into().map_err(|_| EvmError::FailedToDecryptKey(String::from("could not process the hashed data.")))?; + let nonce:[u8; NONCE_LENGTH] = encrypted_data[SALT_LENGTH..SALT_LENGTH+NONCE_LENGTH] + .try_into().map_err(|_| EvmError::FailedToDecryptKey(String::from("Could not process the hashed data")))?; + let encrypted_secretkey = &encrypted_data[SALT_LENGTH+ NONCE_LENGTH ..]; + + let mut key = [0; 32]; + + // Reconstruct the key from salt and password + ring::pbkdf2::derive( + ring::pbkdf2::PBKDF2_HMAC_SHA512, + ITERATIONS, + &salt, + password.as_bytes(), + &mut key, + ); + + // Create an unbound key from the previously reconstructed key + let unbound_key = ring::aead::UnboundKey::new(&ring::aead::CHACHA20_POLY1305, &key) + .map_err(|_| { + EvmError::FailedToDecryptKey(String::from("Could not create unbound key.")) + })?; + + + // Create an opening key using the unbound key and original nonce + let mut opening_key = ring::aead::OpeningKey::new(unbound_key, NonceSeq(nonce)); + let aad = ring::aead::Aad::from(&[]); + + // Convert the hex encoded and encrypted secret key to bytes + // let mut encrypted_secret_key = hex::decode(encrypted_secretkey).map_err(|_| { + // EvmError::FailedToDecryptKey(String::from("Invalid encrypted secret key encoding.")) + // }).expect("error"); + + let mut encrypted_secret_key = encrypted_secretkey.to_vec(); + // Decrypt the encrypted secret key bytes + let decrypted_data = opening_key + .open_in_place(aad, &mut encrypted_secret_key) + .map_err(|_| EvmError::FailedToDecryptKey(String::from("Could not open encrypted key, please check the password")))?; + + let mut secret_key_bytes = [0u8; 66]; + secret_key_bytes.copy_from_slice(&decrypted_data[0..66]); + + // Create secret key from decrypted byte + + Ok(String::from_utf8(secret_key_bytes.to_vec()).expect("not able to convert private key")) + +} \ No newline at end of file diff --git a/sn_evm/src/error.rs b/sn_evm/src/error.rs index 386683b5aa..7086ed8180 100644 --- a/sn_evm/src/error.rs +++ b/sn_evm/src/error.rs @@ -30,4 +30,13 @@ pub enum EvmError { #[error("Invalid quote public key")] InvalidQuotePublicKey, + + #[error("Failed to encrypt secret key: {0}")] + FailedToEncryptKey(String), + + #[error("Failed to decrypt secret key: {0}")] + FailedToDecryptKey(String), + + #[error("Failed to process Operation: {0}")] + OperationError(String), } diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index 49956db39e..40b2b2a762 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -27,6 +27,8 @@ pub use evmlib::Network as EvmNetwork; mod amount; mod data_payments; mod error; +pub mod wallet; +pub mod encryption; pub use data_payments::{PaymentQuote, ProofOfPayment, QuotingMetrics}; diff --git a/sn_evm/src/wallet.rs b/sn_evm/src/wallet.rs new file mode 100644 index 0000000000..1e41da8054 --- /dev/null +++ b/sn_evm/src/wallet.rs @@ -0,0 +1,103 @@ +use evmlib::wallet::{get_random_private_key_for_wallet, Wallet}; +use evmlib::utils::get_evm_network_from_env; +use tokio::{runtime::Runtime, task}; +use std::fs::File; +use std::fs; +use std::io::Write; +use std::path::Path; +use rpassword::read_password; +use color_eyre::eyre::{eyre, Context, Result}; +use std::path::PathBuf; +use crate::EvmError; + +pub const ENCRYPTED_MAIN_SECRET_KEY_FILENAME: &str = "main_secret_key.encrypted"; + + + +pub fn get_random_private_key() -> String { + get_random_private_key_for_wallet() +} + +pub fn get_gas_token_details(private_key: &String) -> Result<(),EvmError>{ + + let network = get_evm_network_from_env() + .map_err(|_| EvmError::OperationError("Not able to create the Network".to_string()))?; + + + let wallet = Wallet::new_from_private_key(network, &private_key) + .map_err(|_| EvmError::OperationError("Not able to create the Wallet".to_string()))?; + + task::block_in_place(|| { + let rt = Runtime::new() + .map_err(|_| EvmError::OperationError("Not able to create tokio runtime for wallet operation".to_string())) + .expect("Not able to create the runtime"); + + rt.block_on(async { + match wallet.balance_of_gas_tokens().await { + Ok(balance) => println!("balance of gas tokens: {:?}", balance), + Err(e) => eprintln!("Error: {:?}", e), + } + match wallet.balance_of_tokens().await { + Ok(balance) => println!("balance of tokens: {:?}", balance), + Err(e) => eprintln!("Error: {:?}", e), + } + + }) + }); + Ok(()) +} + +pub fn create_a_evm_wallet(private_key: &String) -> Result { + let network = get_evm_network_from_env() + .map_err(|_| EvmError::OperationError("Not able to create the Network".to_string()))?; + let wallet = Wallet::new_from_private_key(network, &private_key) + .map_err(|_| EvmError::OperationError("Not able to get the wallet".to_string()))?; + Ok(hex::encode(wallet.address())) +} + +pub fn create_file_with_keys(private_key: String, public_key: String) -> Result { + let mut file_dir_path = get_client_wallet_dir_path() + .map_err(|_| EvmError::OperationError("Not able to get the file dir path".to_string()))?; + file_dir_path.push(public_key); + let mut file = File::create(&file_dir_path).map_err(|_| EvmError::OperationError("Not able to create the wallet file".to_string()))?; + file.write_all(private_key.as_bytes()).map_err(|_| EvmError::OperationError("Not able to write into wallet".to_string()))?; + Ok(file_dir_path.to_string_lossy().to_string()) +} + +pub fn wallet_encryption_status(root_dir: &Path) -> bool { + let wallelt_file_path = root_dir.join(ENCRYPTED_MAIN_SECRET_KEY_FILENAME); + wallelt_file_path.is_file() +} + +pub fn wallet_encryption_storage(dir_path: &str, content: &str) -> Result { + // ensure the directory exists; + fs::create_dir_all(dir_path).map_err(|_| EvmError::OperationError("Not able to create the directory".to_string()))?; + let file_path = format!("{}/{}", dir_path, ENCRYPTED_MAIN_SECRET_KEY_FILENAME); + + let mut file = File::create(&file_path).map_err(|_| EvmError::OperationError("Not able to create the file".to_string()))?; + file.write_all(content.as_bytes()).map_err(|_| EvmError::OperationError("Not able to write into the file".to_string()))?; + let file_path = Path::new(&file_path).canonicalize().map_err(|_| EvmError::OperationError("Not able to get the full path of the wallet".to_string()))?; + Ok(file_path.to_string_lossy().to_string()) +} + +pub fn prompt_the_user_for_password() -> Option { + println!("Please enter the password: "); + let pwd = match read_password() { + Ok(pwd) => pwd, + Err(e) => { + eprintln!("Failed to read password: {}",e); + return None; + } + }; + Some(pwd) +} + +pub fn get_client_wallet_dir_path() -> Result { + let mut home_dirs = dirs_next::data_dir() + .ok_or_else(|| eyre!("Failed to obtain data dir, your OS might not be supported."))?; + home_dirs.push("safe"); + home_dirs.push("autonomi"); + home_dirs.push("wallets"); + std::fs::create_dir_all(home_dirs.as_path()).wrap_err("Failed to create data dir")?; + Ok(home_dirs) +} \ No newline at end of file From aea309f21eefd7706e2857c5690aca4734fe2ecf Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 29 Oct 2024 13:31:00 +0100 Subject: [PATCH 125/128] chore: update ANT contract max supply for Anvil --- evmlib/artifacts/AutonomiNetworkToken.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/evmlib/artifacts/AutonomiNetworkToken.json b/evmlib/artifacts/AutonomiNetworkToken.json index b075133e1c..841ed5d678 100644 --- a/evmlib/artifacts/AutonomiNetworkToken.json +++ b/evmlib/artifacts/AutonomiNetworkToken.json @@ -890,8 +890,8 @@ "type": "function" } ], - "bytecode": "0x6101606040523480156200001257600080fd5b506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e00000000000000000000000081525080604051806040016040528060018152602001603160f81b8152506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e0000000000000000000000008152506040518060400160405280600381526020016210539560ea1b8152508160039081620000c79190620009b5565b506004620000d68282620009b5565b50620000e891508390506005620001c0565b61012052620000f9816006620001c0565b61014052815160208084019190912060e052815190820120610100524660a0526200018760e05161010051604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201529081019290925260608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b60805250503060c05250620001ba33620001a46012600a62000b94565b620001b4906301312d0062000ba5565b620001f9565b62000cae565b6000602083511015620001e057620001d8836200023b565b9050620001f3565b81620001ed8482620009b5565b5060ff90505b92915050565b6001600160a01b038216620002295760405163ec442f0560e01b8152600060048201526024015b60405180910390fd5b62000237600083836200027e565b5050565b600080829050601f8151111562000269578260405163305a27a960e01b815260040162000220919062000bbf565b8051620002768262000c10565b179392505050565b6200028b83838362000290565b505050565b6200029d838383620002ff565b6001600160a01b038316620002f2576000620002b860025490565b90506001600160d01b0380821115620002ef57604051630e58ae9360e11b8152600481018390526024810182905260440162000220565b50505b6200028b83838362000432565b6001600160a01b0383166200032e57806002600082825462000322919062000c35565b90915550620003a29050565b6001600160a01b03831660009081526020819052604090205481811015620003835760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640162000220565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b038216620003c057600280548290039055620003df565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040516200042591815260200190565b60405180910390a3505050565b6001600160a01b038316620004675762000464600a62000953620004ca60201b176200045e84620004df565b62000519565b50505b6001600160a01b038216620004965762000493600a6200095f6200055660201b176200045e84620004df565b50505b6001600160a01b038381166000908152600860205260408082205485841683529120546200028b9291821691168362000564565b6000620004d8828462000c4b565b9392505050565b60006001600160d01b0382111562000515576040516306dfcc6560e41b815260d060048201526024810183905260440162000220565b5090565b600080620005496200052a620006cb565b620005406200053988620006dc565b868860201c565b8791906200072b565b915091505b935093915050565b6000620004d8828462000c75565b816001600160a01b0316836001600160a01b031614158015620005875750600081115b156200028b576001600160a01b038316156200062a576001600160a01b038316600090815260096020908152604082208291620005d5919062000556901b6200095f176200045e86620004df565b6001600160d01b031691506001600160d01b03169150846001600160a01b031660008051602062002bda83398151915283836040516200061f929190918252602082015260400190565b60405180910390a250505b6001600160a01b038216156200028b576001600160a01b038216600090815260096020908152604082208291620006729190620004ca901b62000953176200045e86620004df565b6001600160d01b031691506001600160d01b03169150836001600160a01b031660008051602062002bda8339815191528383604051620006bc929190918252602082015260400190565b60405180910390a25050505050565b6000620006d76200073b565b905090565b8054600090801562000722576200070883620006fa60018462000c98565b600091825260209091200190565b54660100000000000090046001600160d01b0316620004d8565b60009392505050565b6000806200054985858562000748565b6000620006d743620008da565b8254600090819080156200087b5760006200076a87620006fa60018562000c98565b60408051808201909152905465ffffffffffff80821680845266010000000000009092046001600160d01b031660208401529192509087161015620007c257604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff808816911603620008165784620007e988620006fa60018662000c98565b80546001600160d01b039290921666010000000000000265ffffffffffff9092169190911790556200086a565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d815291909120945191519092166601000000000000029216919091179101555b6020015192508391506200054e9050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a81529182209551925190931666010000000000000291909316179201919091559050816200054e565b600065ffffffffffff82111562000515576040516306dfcc6560e41b8152603060048201526024810183905260440162000220565b634e487b7160e01b600052604160045260246000fd5b600181811c908216806200093a57607f821691505b6020821081036200095b57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200028b576000816000526020600020601f850160051c810160208610156200098c5750805b601f850160051c820191505b81811015620009ad5782815560010162000998565b505050505050565b81516001600160401b03811115620009d157620009d16200090f565b620009e981620009e2845462000925565b8462000961565b602080601f83116001811462000a21576000841562000a085750858301515b600019600386901b1c1916600185901b178555620009ad565b600085815260208120601f198616915b8281101562000a525788860151825594840194600190910190840162000a31565b508582101562000a715787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b634e487b7160e01b600052601160045260246000fd5b600181815b8085111562000ad857816000190482111562000abc5762000abc62000a81565b8085161562000aca57918102915b93841c939080029062000a9c565b509250929050565b60008262000af157506001620001f3565b8162000b0057506000620001f3565b816001811462000b19576002811462000b245762000b44565b6001915050620001f3565b60ff84111562000b385762000b3862000a81565b50506001821b620001f3565b5060208310610133831016604e8410600b841016171562000b69575081810a620001f3565b62000b75838362000a97565b806000190482111562000b8c5762000b8c62000a81565b029392505050565b6000620004d860ff84168362000ae0565b8082028115828204841417620001f357620001f362000a81565b60006020808352835180602085015260005b8181101562000bef5785810183015185820160400152820162000bd1565b506000604082860101526040601f19601f8301168501019250505092915050565b805160208083015191908110156200095b5760001960209190910360031b1b16919050565b80820180821115620001f357620001f362000a81565b6001600160d01b0381811683821601908082111562000c6e5762000c6e62000a81565b5092915050565b6001600160d01b0382811682821603908082111562000c6e5762000c6e62000a81565b81810381811115620001f357620001f362000a81565b60805160a05160c05160e051610100516101205161014051611ed162000d096000396000610d9901526000610d6c01526000610b3401526000610b0c01526000610a6701526000610a9101526000610abb0152611ed16000f3fe608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220ed8af9c04c0db3fd29db1bfe40925e4a5564caf35ad274b7ecb6f7d723229caf64736f6c63430008180033dec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a724", - "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220ed8af9c04c0db3fd29db1bfe40925e4a5564caf35ad274b7ecb6f7d723229caf64736f6c63430008180033", + "bytecode": "0x6101606040523480156200001257600080fd5b506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e00000000000000000000000081525080604051806040016040528060018152602001603160f81b8152506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e0000000000000000000000008152506040518060400160405280600381526020016210539560ea1b8152508160039081620000c79190620009b4565b506004620000d68282620009b4565b50620000e891508390506005620001bf565b61012052620000f9816006620001bf565b61014052815160208084019190912060e052815190820120610100524660a0526200018760e05161010051604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201529081019290925260608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b60805250503060c05250620001b933620001a46012600a62000b93565b620001b390622625a062000ba4565b620001f8565b62000cad565b6000602083511015620001df57620001d7836200023a565b9050620001f2565b81620001ec8482620009b4565b5060ff90505b92915050565b6001600160a01b038216620002285760405163ec442f0560e01b8152600060048201526024015b60405180910390fd5b62000236600083836200027d565b5050565b600080829050601f8151111562000268578260405163305a27a960e01b81526004016200021f919062000bbe565b8051620002758262000c0f565b179392505050565b6200028a8383836200028f565b505050565b6200029c838383620002fe565b6001600160a01b038316620002f1576000620002b760025490565b90506001600160d01b0380821115620002ee57604051630e58ae9360e11b815260048101839052602481018290526044016200021f565b50505b6200028a83838362000431565b6001600160a01b0383166200032d57806002600082825462000321919062000c34565b90915550620003a19050565b6001600160a01b03831660009081526020819052604090205481811015620003825760405163391434e360e21b81526001600160a01b038516600482015260248101829052604481018390526064016200021f565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b038216620003bf57600280548290039055620003de565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040516200042491815260200190565b60405180910390a3505050565b6001600160a01b038316620004665762000463600a62000953620004c960201b176200045d84620004de565b62000518565b50505b6001600160a01b038216620004955762000492600a6200095f6200055560201b176200045d84620004de565b50505b6001600160a01b038381166000908152600860205260408082205485841683529120546200028a9291821691168362000563565b6000620004d7828462000c4a565b9392505050565b60006001600160d01b0382111562000514576040516306dfcc6560e41b815260d06004820152602481018390526044016200021f565b5090565b6000806200054862000529620006ca565b6200053f6200053888620006db565b868860201c565b8791906200072a565b915091505b935093915050565b6000620004d7828462000c74565b816001600160a01b0316836001600160a01b031614158015620005865750600081115b156200028a576001600160a01b0383161562000629576001600160a01b038316600090815260096020908152604082208291620005d4919062000555901b6200095f176200045d86620004de565b6001600160d01b031691506001600160d01b03169150846001600160a01b031660008051602062002bd983398151915283836040516200061e929190918252602082015260400190565b60405180910390a250505b6001600160a01b038216156200028a576001600160a01b038216600090815260096020908152604082208291620006719190620004c9901b62000953176200045d86620004de565b6001600160d01b031691506001600160d01b03169150836001600160a01b031660008051602062002bd98339815191528383604051620006bb929190918252602082015260400190565b60405180910390a25050505050565b6000620006d66200073a565b905090565b8054600090801562000721576200070783620006f960018462000c97565b600091825260209091200190565b54660100000000000090046001600160d01b0316620004d7565b60009392505050565b6000806200054885858562000747565b6000620006d643620008d9565b8254600090819080156200087a5760006200076987620006f960018562000c97565b60408051808201909152905465ffffffffffff80821680845266010000000000009092046001600160d01b031660208401529192509087161015620007c157604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff808816911603620008155784620007e888620006f960018662000c97565b80546001600160d01b039290921666010000000000000265ffffffffffff90921691909117905562000869565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d815291909120945191519092166601000000000000029216919091179101555b6020015192508391506200054d9050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a81529182209551925190931666010000000000000291909316179201919091559050816200054d565b600065ffffffffffff82111562000514576040516306dfcc6560e41b815260306004820152602481018390526044016200021f565b634e487b7160e01b600052604160045260246000fd5b600181811c908216806200093957607f821691505b6020821081036200095a57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200028a576000816000526020600020601f850160051c810160208610156200098b5750805b601f850160051c820191505b81811015620009ac5782815560010162000997565b505050505050565b81516001600160401b03811115620009d057620009d06200090e565b620009e881620009e1845462000924565b8462000960565b602080601f83116001811462000a20576000841562000a075750858301515b600019600386901b1c1916600185901b178555620009ac565b600085815260208120601f198616915b8281101562000a515788860151825594840194600190910190840162000a30565b508582101562000a705787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b634e487b7160e01b600052601160045260246000fd5b600181815b8085111562000ad757816000190482111562000abb5762000abb62000a80565b8085161562000ac957918102915b93841c939080029062000a9b565b509250929050565b60008262000af057506001620001f2565b8162000aff57506000620001f2565b816001811462000b18576002811462000b235762000b43565b6001915050620001f2565b60ff84111562000b375762000b3762000a80565b50506001821b620001f2565b5060208310610133831016604e8410600b841016171562000b68575081810a620001f2565b62000b74838362000a96565b806000190482111562000b8b5762000b8b62000a80565b029392505050565b6000620004d760ff84168362000adf565b8082028115828204841417620001f257620001f262000a80565b60006020808352835180602085015260005b8181101562000bee5785810183015185820160400152820162000bd0565b506000604082860101526040601f19601f8301168501019250505092915050565b805160208083015191908110156200095a5760001960209190910360031b1b16919050565b80820180821115620001f257620001f262000a80565b6001600160d01b0381811683821601908082111562000c6d5762000c6d62000a80565b5092915050565b6001600160d01b0382811682821603908082111562000c6d5762000c6d62000a80565b81810381811115620001f257620001f262000a80565b60805160a05160c05160e051610100516101205161014051611ed162000d086000396000610d9901526000610d6c01526000610b3401526000610b0c01526000610a6701526000610a9101526000610abb0152611ed16000f3fe608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220e6593b092e1a24b35f83124c9f1435eef683cc4ae5be2f7a133072dc046158f264736f6c63430008180033dec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a724", + "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220e6593b092e1a24b35f83124c9f1435eef683cc4ae5be2f7a133072dc046158f264736f6c63430008180033", "linkReferences": {}, "deployedLinkReferences": {} -} +} \ No newline at end of file From e145bb17ce29b6b3e2e803e9f2a00b544d2f03af Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 28 Oct 2024 11:57:26 +0100 Subject: [PATCH 126/128] feat(cli): multi-wallet support & select wallet from disk --- Cargo.lock | 65 ++++++++- autonomi-cli/Cargo.toml | 5 + autonomi-cli/src/access/data_dir.rs | 2 +- autonomi-cli/src/access/keys.rs | 32 ++-- autonomi-cli/src/commands.rs | 34 ++--- autonomi-cli/src/commands/file.rs | 3 +- autonomi-cli/src/commands/register.rs | 3 +- autonomi-cli/src/commands/vault.rs | 5 +- autonomi-cli/src/commands/wallet.rs | 135 ++++++++--------- autonomi-cli/src/main.rs | 1 + autonomi-cli/src/wallet/encryption.rs | 171 ++++++++++++++++++++++ autonomi-cli/src/wallet/error.rs | 31 ++++ autonomi-cli/src/wallet/fs.rs | 202 ++++++++++++++++++++++++++ autonomi-cli/src/wallet/input.rs | 68 +++++++++ autonomi-cli/src/wallet/mod.rs | 42 ++++++ autonomi/src/lib.rs | 1 - autonomi/src/wallet.rs | 139 ------------------ evmlib/src/utils.rs | 6 +- evmlib/src/wallet.rs | 12 +- sn_evm/Cargo.toml | 4 - sn_evm/src/encryption.rs | 134 ----------------- sn_evm/src/error.rs | 10 -- sn_evm/src/lib.rs | 2 - sn_evm/src/wallet.rs | 103 ------------- 24 files changed, 682 insertions(+), 528 deletions(-) create mode 100644 autonomi-cli/src/wallet/encryption.rs create mode 100644 autonomi-cli/src/wallet/error.rs create mode 100644 autonomi-cli/src/wallet/fs.rs create mode 100644 autonomi-cli/src/wallet/input.rs create mode 100644 autonomi-cli/src/wallet/mod.rs delete mode 100644 autonomi/src/wallet.rs delete mode 100644 sn_evm/src/encryption.rs delete mode 100644 sn_evm/src/wallet.rs diff --git a/Cargo.lock b/Cargo.lock index e04a254cbf..95fe9c532a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1128,12 +1128,16 @@ dependencies = [ "autonomi", "clap", "color-eyre", + "const-hex", "criterion", "dirs-next", "eyre", + "hex 0.4.3", "indicatif", + "prettytable", "rand 0.8.5", "rayon", + "ring 0.17.8", "rpassword", "serde", "serde_json", @@ -1918,7 +1922,7 @@ version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" dependencies = [ - "encode_unicode", + "encode_unicode 0.3.6", "lazy_static", "libc", "unicode-width", @@ -1937,9 +1941,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" dependencies = [ "cfg-if", "cpufeatures", @@ -2215,6 +2219,27 @@ dependencies = [ "subtle", ] +[[package]] +name = "csv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + [[package]] name = "ctr" version = "0.9.2" @@ -2708,6 +2733,12 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + [[package]] name = "encoding_rs" version = "0.8.34" @@ -6526,6 +6557,20 @@ dependencies = [ "yansi", ] +[[package]] +name = "prettytable" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46480520d1b77c9a3482d39939fcf96831537a250ec62d4fd8fbdf8e0302e781" +dependencies = [ + "csv", + "encode_unicode 1.0.0", + "is-terminal", + "lazy_static", + "term", + "unicode-width", +] + [[package]] name = "primitive-types" version = "0.12.2" @@ -8222,9 +8267,7 @@ dependencies = [ name = "sn_evm" version = "0.1.2-rc.2" dependencies = [ - "color-eyre", "custom_debug", - "dirs-next", "evmlib", "hex 0.4.3", "lazy_static", @@ -8232,7 +8275,6 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rmp-serde", - "rpassword", "serde", "serde_json", "tempfile", @@ -8818,6 +8860,17 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "term" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", +] + [[package]] name = "terminal_size" version = "0.3.0" diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index 25be46525c..aed30589b5 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -33,9 +33,12 @@ autonomi = { path = "../autonomi", version = "0.2.2-rc.2", features = [ ] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "~0.6" +const-hex = "1.13.1" dirs-next = "~2.0.0" +prettytable = "0.10.0" thiserror = "1.0" indicatif = { version = "0.17.5", features = ["tokio"] } +rand = { version = "~0.8.5", features = ["small_rng"] } rpassword = "7.0" tokio = { version = "1.32.0", features = [ "io-util", @@ -53,6 +56,8 @@ sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } walkdir = "2.5.0" serde_json = "1.0.132" serde = "1.0.210" +hex = "0.4.3" +ring = "0.17.8" [dev-dependencies] autonomi = { path = "../autonomi", version = "0.2.2-rc.2", features = [ diff --git a/autonomi-cli/src/access/data_dir.rs b/autonomi-cli/src/access/data_dir.rs index c934ee7b16..9233507264 100644 --- a/autonomi-cli/src/access/data_dir.rs +++ b/autonomi-cli/src/access/data_dir.rs @@ -25,4 +25,4 @@ pub fn get_client_data_dir_path() -> Result { ) })?; Ok(home_dirs) -} \ No newline at end of file +} diff --git a/autonomi-cli/src/access/keys.rs b/autonomi-cli/src/access/keys.rs index ecdc5aee10..cfaa5284b7 100644 --- a/autonomi-cli/src/access/keys.rs +++ b/autonomi-cli/src/access/keys.rs @@ -6,10 +6,11 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::wallet::load_wallet_private_key; use autonomi::client::registers::RegisterSecretKey; use autonomi::client::vault::VaultSecretKey; use autonomi::{get_evm_network_from_env, Wallet}; -use color_eyre::eyre::{Context, Result}; +use color_eyre::eyre::{eyre, Context, Result}; use color_eyre::Section; use std::env; use std::fs; @@ -18,13 +19,12 @@ use std::path::PathBuf; const SECRET_KEY_ENV: &str = "SECRET_KEY"; const REGISTER_SIGNING_KEY_ENV: &str = "REGISTER_SIGNING_KEY"; -const SECRET_KEY_FILE: &str = "secret_key"; const REGISTER_SIGNING_KEY_FILE: &str = "register_signing_key"; /// EVM wallet -pub fn load_evm_wallet() -> Result { +pub fn load_evm_wallet_from_env() -> Result { let secret_key = - get_secret_key().wrap_err("The secret key is required to perform this action")?; + get_secret_key_from_env().wrap_err("The secret key is required to perform this action")?; let network = get_evm_network_from_env()?; let wallet = Wallet::new_from_private_key(network, &secret_key) .wrap_err("Failed to load EVM wallet from key")?; @@ -32,28 +32,14 @@ pub fn load_evm_wallet() -> Result { } /// EVM wallet private key -pub fn get_secret_key() -> Result { - // try env var first - let why_env_failed = match env::var(SECRET_KEY_ENV) { - Ok(key) => return Ok(key), - Err(e) => e, - }; - - // try from data dir - let dir = super::data_dir::get_client_data_dir_path() - .wrap_err(format!("Failed to obtain secret key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir")) - .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY_ENV} env var"))?; - - // load the key from file - let key_path = dir.join(SECRET_KEY_FILE); - fs::read_to_string(&key_path) - .wrap_err("Failed to read secret key from file") - .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY_ENV} env var or have the key in a file at {key_path:?}")) - .with_suggestion(|| "the secret key should be a hex encoded string of your evm wallet private key") +pub fn get_secret_key_from_env() -> Result { + env::var(SECRET_KEY_ENV).wrap_err(eyre!( + "make sure you've provided the {SECRET_KEY_ENV} env var" + )) } pub fn get_vault_secret_key() -> Result { - let secret_key = get_secret_key()?; + let secret_key = load_wallet_private_key()?; autonomi::client::vault::derive_vault_key(&secret_key) .wrap_err("Failed to derive vault secret key from EVM secret key") } diff --git a/autonomi-cli/src/commands.rs b/autonomi-cli/src/commands.rs index 6bce261b14..663898b6ea 100644 --- a/autonomi-cli/src/commands.rs +++ b/autonomi-cli/src/commands.rs @@ -40,7 +40,7 @@ pub enum SubCmd { Wallet { #[command(subcommand)] command: WalletCmd, - } + }, } #[derive(Subcommand, Debug)] @@ -154,19 +154,20 @@ pub enum VaultCmd { #[derive(Subcommand, Debug)] pub enum WalletCmd { - /// Create a wallet - // #[command(subcommand)] + /// Create a wallet. Create { - #[arg(long)] - encrypt: Option, - #[arg(long)] - password: Option, - #[arg(long)] + /// Optional flag to not add a password. + #[clap(long, action)] + no_password: bool, + /// Optional hex-encoded private key. + #[clap(long)] private_key: Option, - + /// Optional password to encrypt the wallet with. + #[clap(long, short)] + password: Option, }, - /// Check the balance of the wallet + /// Check the balance of the wallet. Balance, } @@ -207,14 +208,11 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { }, SubCmd::Wallet { command } => match command { WalletCmd::Create { - encrypt, - password, + no_password, private_key, - - } => { - wallet::create( encrypt, password,private_key) - }, - WalletCmd::Balance => Ok(wallet::balance()?), - } + password, + } => wallet::create(no_password, private_key, password), + WalletCmd::Balance => Ok(wallet::balance().await?), + }, } } diff --git a/autonomi-cli/src/commands/file.rs b/autonomi-cli/src/commands/file.rs index e32b98b51d..6d3f051015 100644 --- a/autonomi-cli/src/commands/file.rs +++ b/autonomi-cli/src/commands/file.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::utils::collect_upload_summary; +use crate::wallet::load_wallet; use autonomi::client::address::addr_to_str; use autonomi::Multiaddr; use color_eyre::eyre::Context; @@ -31,7 +32,7 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { } pub async fn upload(file: &str, public: bool, peers: Vec) -> Result<()> { - let wallet = crate::keys::load_evm_wallet()?; + let wallet = load_wallet()?; let mut client = crate::actions::connect_to_network(peers).await?; let event_receiver = client.enable_client_events(); let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); diff --git a/autonomi-cli/src/commands/register.rs b/autonomi-cli/src/commands/register.rs index 0fdd9437ea..0aad3ab844 100644 --- a/autonomi-cli/src/commands/register.rs +++ b/autonomi-cli/src/commands/register.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::utils::collect_upload_summary; +use crate::wallet::load_wallet; use autonomi::client::registers::RegisterAddress; use autonomi::client::registers::RegisterPermissions; use autonomi::client::registers::RegisterSecretKey; @@ -51,7 +52,7 @@ pub async fn cost(name: &str, peers: Vec) -> Result<()> { } pub async fn create(name: &str, value: &str, public: bool, peers: Vec) -> Result<()> { - let wallet = crate::keys::load_evm_wallet()?; + let wallet = load_wallet()?; let register_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; let mut client = crate::actions::connect_to_network(peers).await?; diff --git a/autonomi-cli/src/commands/vault.rs b/autonomi-cli/src/commands/vault.rs index 60c0c8192f..e7ce3f95c8 100644 --- a/autonomi-cli/src/commands/vault.rs +++ b/autonomi-cli/src/commands/vault.rs @@ -6,6 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::wallet::load_wallet; use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::eyre::Result; @@ -28,7 +29,7 @@ pub async fn cost(peers: Vec) -> Result<()> { pub async fn create(peers: Vec) -> Result<()> { let client = crate::actions::connect_to_network(peers).await?; - let wallet = crate::keys::load_evm_wallet()?; + let wallet = load_wallet()?; let vault_sk = crate::keys::get_vault_secret_key()?; println!("Retrieving local user data..."); @@ -59,7 +60,7 @@ pub async fn create(peers: Vec) -> Result<()> { pub async fn sync(peers: Vec, force: bool) -> Result<()> { let client = crate::actions::connect_to_network(peers).await?; let vault_sk = crate::keys::get_vault_secret_key()?; - let wallet = crate::keys::load_evm_wallet()?; + let wallet = load_wallet()?; println!("Fetching vault from network..."); let net_user_data = client diff --git a/autonomi-cli/src/commands/wallet.rs b/autonomi-cli/src/commands/wallet.rs index 8dcfb25802..3b31a873b2 100644 --- a/autonomi-cli/src/commands/wallet.rs +++ b/autonomi-cli/src/commands/wallet.rs @@ -6,95 +6,80 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. - -use autonomi::wallet::*; +use crate::wallet::fs::{select_wallet, store_private_key}; +use crate::wallet::input::request_password; +use crate::wallet::DUMMY_NETWORK; +use autonomi::Wallet; +use color_eyre::eyre::eyre; use color_eyre::Result; -use rpassword::read_password; - - -pub fn process_password(encryption: Option, password: Option) -> Option { - match encryption { - Some(value) => { - if !(value == "Y" || value == "y" || value == "Yes" || value == "YES" || value == "yes") { - println!("value: {}", value); - return None; - } - match password { - Some(passw) => { - return Some(passw); - } - None => { - //prompt for the password - println!("Please enter the Password"); - let input_password = read_password(); - let pwd = match input_password { - Ok(pwd) => pwd, - Err(e) => { - eprintln!("Failed to read password: {}",e); - println!("Try again..."); - panic!("issue with password"); - } - }; - Some(pwd) - } - } - } - _ => None, +use prettytable::{Cell, Row, Table}; + +const WALLET_PASSWORD_REQUIRED: bool = false; + +pub fn create( + no_password: bool, + private_key: Option, + password: Option, +) -> Result<()> { + if no_password && password.is_some() { + return Err(eyre!( + "Only one of `--no-password` or `--password` may be specified" + )); } -} + // Set a password for encryption or not + let encryption_password: Option = match (no_password, password) { + (true, _) => None, + (false, Some(pass)) => Some(pass.to_owned()), + (false, None) => request_password(WALLET_PASSWORD_REQUIRED), + }; + let wallet_private_key = if let Some(private_key) = private_key { + // Validate imported key + Wallet::new_from_private_key(DUMMY_NETWORK, &private_key) + .map_err(|_| eyre!("Please provide a valid secret key in hex format"))?; -pub fn create(encryption: Option, password: Option, private_key: Option) -> Result<()>{ - let pass = process_password(encryption, password); + private_key + } else { + // Create a new key + Wallet::random_private_key() + }; - match private_key { - Some(priv_key) => { - import_new_wallet(priv_key, pass) - }, - None => create_new_wallet(pass), - } -} + let wallet_address = Wallet::new_from_private_key(DUMMY_NETWORK, &wallet_private_key) + .expect("Infallible") + .address() + .to_string(); -pub fn import_new_wallet(private_key: String, encryption: Option) -> Result<()> { - let mut file_path = import_evm_wallet(private_key); + // Save the private key file + let file_path = store_private_key(&wallet_private_key, encryption_password)?; - if let Some(passw) = encryption { - file_path = encrypt_evm_wallet(file_path?, passw); - } + println!("Wallet address: {wallet_address}"); + println!("Stored wallet in: {file_path:?}"); - println!("The wallet is imported here: {}", file_path?); Ok(()) } -pub fn create_new_wallet(encryption: Option) -> Result<()> { - let mut file_path = create_evm_wallet(); - if let Some(passw) = encryption { - file_path = encrypt_evm_wallet(file_path?, passw); - } +pub async fn balance() -> Result<()> { + let wallet = select_wallet()?; - println!("The wallet is created here: {}", file_path?); - Ok(()) -} + let token_balance = wallet.balance_of_tokens().await?; + let gas_balance = wallet.balance_of_gas_tokens().await?; -pub fn balance() -> Result<()> { - // list_available_public_wallets - // Call the function to get numbered file names as a HashMap - let get_client_data_dir_path = get_wallet_directory(); - - let files = get_numbered_files(get_client_data_dir_path)?; - - let mut sorted_files: Vec<(&u32, &(String,String))> = files.iter().collect(); - sorted_files.sort_by_key(|&(key, _)| key); - // Print the HashMap - for (key, value) in sorted_files { - println!("{}: - {} - {}", key, value.0, value.1); - } + println!("Wallet balances: {}", wallet.address()); - let key = prompt_for_key()?; + let mut table = Table::new(); + + table.add_row(Row::new(vec![ + Cell::new("Token Balance"), + Cell::new(&token_balance.to_string()), + ])); + + table.add_row(Row::new(vec![ + Cell::new("Gas Balance"), + Cell::new(&gas_balance.to_string()), + ])); + + table.printstd(); - if let Ok(private_key) = get_private_key_from_wallet(key, files){ - get_wallet_information(private_key); - } Ok(()) -} \ No newline at end of file +} diff --git a/autonomi-cli/src/main.rs b/autonomi-cli/src/main.rs index 0953d81d1d..f86d74f484 100644 --- a/autonomi-cli/src/main.rs +++ b/autonomi-cli/src/main.rs @@ -14,6 +14,7 @@ mod actions; mod commands; mod opt; mod utils; +mod wallet; pub use access::data_dir; pub use access::keys; diff --git a/autonomi-cli/src/wallet/encryption.rs b/autonomi-cli/src/wallet/encryption.rs new file mode 100644 index 0000000000..bc673574ce --- /dev/null +++ b/autonomi-cli/src/wallet/encryption.rs @@ -0,0 +1,171 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::wallet::error::Error; +use rand::Rng; +use ring::aead::{BoundKey, Nonce, NonceSequence}; +use ring::error::Unspecified; +use std::num::NonZeroU32; +use std::sync::LazyLock; + +const SALT_LENGTH: usize = 8; +const NONCE_LENGTH: usize = 12; + +/// Number of iterations for pbkdf2. +static ITERATIONS: LazyLock = + LazyLock::new(|| NonZeroU32::new(100_000).expect("Infallible")); + +struct NonceSeq([u8; 12]); + +impl NonceSequence for NonceSeq { + fn advance(&mut self) -> Result { + Nonce::try_assume_unique_for_key(&self.0) + } +} + +pub fn encrypt_private_key(private_key: &str, password: &str) -> Result { + // Generate a random salt + // Salt is used to ensure unique derived keys even for identical passwords + let mut salt = [0u8; SALT_LENGTH]; + rand::thread_rng().fill(&mut salt); + + // Generate a random nonce + // Nonce is used to ensure unique encryption outputs even for identical inputs + let mut nonce = [0u8; NONCE_LENGTH]; + rand::thread_rng().fill(&mut nonce); + + let mut key = [0; 32]; + + // Derive a key from the password using PBKDF2 with HMAC + // PBKDF2 is used for key derivation to mitigate brute-force attacks by making key derivation computationally expensive + // HMAC is used as the pseudorandom function for its security properties + ring::pbkdf2::derive( + ring::pbkdf2::PBKDF2_HMAC_SHA512, + *ITERATIONS, + &salt, + password.as_bytes(), + &mut key, + ); + + // Create an unbound key using CHACHA20_POLY1305 algorithm + // CHACHA20_POLY1305 is a fast and secure AEAD (Authenticated Encryption with Associated Data) algorithm + let unbound_key = ring::aead::UnboundKey::new(&ring::aead::CHACHA20_POLY1305, &key) + .map_err(|_| Error::FailedToEncryptKey(String::from("Could not create unbound key")))?; + + // Create a sealing key with the unbound key and nonce + let mut sealing_key = ring::aead::SealingKey::new(unbound_key, NonceSeq(nonce)); + let aad = ring::aead::Aad::from(&[]); + + // Convert the secret key to bytes + let private_key_bytes = String::from(private_key).into_bytes(); + let mut encrypted_private_key = private_key_bytes; + + // seal_in_place_append_tag encrypts the data and appends an authentication tag to ensure data integrity + sealing_key + .seal_in_place_append_tag(aad, &mut encrypted_private_key) + .map_err(|_| Error::FailedToEncryptKey(String::from("Could not seal sealing key")))?; + + let mut encrypted_data = Vec::new(); + encrypted_data.extend_from_slice(&salt); + encrypted_data.extend_from_slice(&nonce); + encrypted_data.extend_from_slice(&encrypted_private_key); + + // Return the encrypted secret key along with salt and nonce encoded as hex strings + Ok(hex::encode(encrypted_data)) +} + +pub fn decrypt_private_key(encrypted_data: &str, password: &str) -> Result { + let encrypted_data = hex::decode(encrypted_data) + .map_err(|_| Error::FailedToDecryptKey(String::from("Encrypted data is invalid")))?; + + let salt: [u8; SALT_LENGTH] = encrypted_data[..SALT_LENGTH] + .try_into() + .map_err(|_| Error::FailedToDecryptKey(String::from("Could not find salt")))?; + + let nonce: [u8; NONCE_LENGTH] = encrypted_data[SALT_LENGTH..SALT_LENGTH + NONCE_LENGTH] + .try_into() + .map_err(|_| Error::FailedToDecryptKey(String::from("Could not find nonce")))?; + + let encrypted_private_key = &encrypted_data[SALT_LENGTH + NONCE_LENGTH..]; + + let mut key = [0; 32]; + + // Reconstruct the key from salt and password + ring::pbkdf2::derive( + ring::pbkdf2::PBKDF2_HMAC_SHA512, + *ITERATIONS, + &salt, + password.as_bytes(), + &mut key, + ); + + // Create an unbound key from the previously reconstructed key + let unbound_key = ring::aead::UnboundKey::new(&ring::aead::CHACHA20_POLY1305, &key) + .map_err(|_| Error::FailedToDecryptKey(String::from("Could not create unbound key")))?; + + // Create an opening key using the unbound key and original nonce + let mut opening_key = ring::aead::OpeningKey::new(unbound_key, NonceSeq(nonce)); + let aad = ring::aead::Aad::from(&[]); + + let mut encrypted_private_key = encrypted_private_key.to_vec(); + + // Decrypt the encrypted secret key bytes + let decrypted_data = opening_key + .open_in_place(aad, &mut encrypted_private_key) + .map_err(|_| { + Error::FailedToDecryptKey(String::from( + "Could not open encrypted key, please check the password", + )) + })?; + + let mut private_key_bytes = [0u8; 66]; + private_key_bytes.copy_from_slice(&decrypted_data[0..66]); + + // Create secret key from decrypted byte + Ok(String::from_utf8(private_key_bytes.to_vec()).expect("not able to convert private key")) +} + +#[cfg(test)] +mod tests { + use super::*; + use autonomi::Wallet; + + #[test] + fn test_encrypt_decrypt_private_key() { + let key = Wallet::random_private_key(); + let password = "password123".to_string(); + + let encrypted_key = + encrypt_private_key(&key, &password).expect("Failed to encrypt the private key"); + + let decrypted_key = decrypt_private_key(&encrypted_key, &password) + .expect("Failed to decrypt the private key"); + + assert_eq!( + decrypted_key, key, + "Decrypted key does not match the original private key" + ); + } + + #[test] + fn test_wrong_password() { + let key = Wallet::random_private_key(); + let password = "password123".to_string(); + + let encrypted_key = + encrypt_private_key(&key, &password).expect("Failed to encrypt the private key"); + + let wrong_password = "password456".to_string(); + let result = decrypt_private_key(&encrypted_key, &wrong_password); + + assert!( + result.is_err(), + "Decryption should not succeed with a wrong password" + ); + } +} diff --git a/autonomi-cli/src/wallet/error.rs b/autonomi-cli/src/wallet/error.rs new file mode 100644 index 0000000000..b32455566d --- /dev/null +++ b/autonomi-cli/src/wallet/error.rs @@ -0,0 +1,31 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Private key is invalid")] + InvalidPrivateKey, + #[error("Private key file is invalid")] + InvalidPrivateKeyFile, + #[error("Failed to encrypt private key: {0}")] + FailedToEncryptKey(String), + #[error("Failed to decrypt private key: {0}")] + FailedToDecryptKey(String), + #[error("Failed to write private key to disk: {0}")] + FailedToStorePrivateKey(String), + #[error("Failed to find wallets folder")] + WalletsFolderNotFound, + #[error("Failed to create wallets folder")] + FailedToCreateWalletsFolder, + #[error("Could not find private key file")] + PrivateKeyFileNotFound, + #[error("No wallets found. Create one using `wallet create`")] + NoWalletsFound, + #[error("Invalid wallet selection input")] + InvalidSelection, +} diff --git a/autonomi-cli/src/wallet/fs.rs b/autonomi-cli/src/wallet/fs.rs new file mode 100644 index 0000000000..a467961016 --- /dev/null +++ b/autonomi-cli/src/wallet/fs.rs @@ -0,0 +1,202 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::wallet::encryption::{decrypt_private_key, encrypt_private_key}; +use crate::wallet::error::Error; +use crate::wallet::input::{get_password_input, get_wallet_selection_input}; +use crate::wallet::DUMMY_NETWORK; +use autonomi::{get_evm_network_from_env, RewardsAddress, Wallet}; +use const_hex::traits::FromHex; +use prettytable::{Cell, Row, Table}; +use std::ffi::OsString; +use std::io::Read; +use std::path::PathBuf; +use std::sync::OnceLock; + +const ENCRYPTED_PRIVATE_KEY_EXT: &str = ".encrypted"; + +pub static SELECTED_WALLET_ADDRESS: OnceLock = OnceLock::new(); + +/// Creates the wallets folder if it is missing and returns the folder path. +pub(crate) fn get_client_wallet_dir_path() -> Result { + let mut home_dirs = dirs_next::data_dir().ok_or(Error::WalletsFolderNotFound)?; + home_dirs.push("safe"); + home_dirs.push("autonomi"); + home_dirs.push("wallets"); + + std::fs::create_dir_all(home_dirs.as_path()).map_err(|_| Error::FailedToCreateWalletsFolder)?; + + Ok(home_dirs) +} + +/// Writes the private key (hex-encoded) to disk. +/// +/// When a password is set, the private key file will be encrypted. +pub(crate) fn store_private_key( + private_key: &str, + encryption_password: Option, +) -> Result { + let wallet = Wallet::new_from_private_key(DUMMY_NETWORK, private_key) + .map_err(|_| Error::InvalidPrivateKey)?; + + // Wallet address + let wallet_address = wallet.address().to_string(); + let wallets_folder = get_client_wallet_dir_path()?; + + // If `encryption_password` is provided, the private key will be encrypted with the password. + // Else it will be saved as plain text. + if let Some(password) = encryption_password.as_ref() { + let encrypted_key = encrypt_private_key(private_key, password)?; + let file_name = format!("{wallet_address}{ENCRYPTED_PRIVATE_KEY_EXT}"); + let file_path = wallets_folder.join(file_name); + + std::fs::write(file_path.clone(), encrypted_key) + .map_err(|err| Error::FailedToStorePrivateKey(err.to_string()))?; + + Ok(file_path.into_os_string()) + } else { + let file_path = wallets_folder.join(wallet_address); + + std::fs::write(file_path.clone(), private_key) + .map_err(|err| Error::FailedToStorePrivateKey(err.to_string()))?; + + Ok(file_path.into_os_string()) + } +} + +/// Loads the private key (hex-encoded) from disk. +/// +/// If the private key file is encrypted, the function will prompt for the decryption password in the CLI. +pub(crate) fn load_private_key(wallet_address: &str) -> Result { + let wallets_folder = get_client_wallet_dir_path()?; + + let mut file_name = wallet_address.to_string(); + + // Check if a file with the encrypted extension exists + let encrypted_file_path = + wallets_folder.join(format!("{wallet_address}{ENCRYPTED_PRIVATE_KEY_EXT}")); + + let is_encrypted = encrypted_file_path.exists(); + + if is_encrypted { + file_name.push_str(ENCRYPTED_PRIVATE_KEY_EXT); + } + + let file_path = wallets_folder.join(file_name); + + let mut file = std::fs::File::open(&file_path).map_err(|_| Error::PrivateKeyFileNotFound)?; + + let mut buffer = String::new(); + file.read_to_string(&mut buffer) + .map_err(|_| Error::InvalidPrivateKeyFile)?; + + // If the file is encrypted, prompt for the password and decrypt the key. + if is_encrypted { + let password = get_password_input("Enter password to decrypt wallet:"); + + decrypt_private_key(&buffer, &password) + } else { + Ok(buffer) + } +} + +pub(crate) fn load_wallet_from_address(wallet_address: &str) -> Result { + let network = get_evm_network_from_env().expect("Could not load EVM network from environment"); + let private_key = load_private_key(wallet_address)?; + let wallet = + Wallet::new_from_private_key(network, &private_key).expect("Could not initialize wallet"); + Ok(wallet) +} + +pub(crate) fn select_wallet() -> Result { + let wallet_address = select_wallet_address()?; + load_wallet_from_address(&wallet_address) +} + +pub(crate) fn select_wallet_private_key() -> Result { + let wallet_address = select_wallet_address()?; + load_private_key(&wallet_address) +} + +pub(crate) fn select_wallet_address() -> Result { + // Try if a wallet address was already selected this session + if let Some(wallet_address) = SELECTED_WALLET_ADDRESS.get() { + return Ok(wallet_address.clone()); + } + + let wallets_folder = get_client_wallet_dir_path()?; + let wallet_files = get_wallet_files(&wallets_folder)?; + + let wallet_address = match wallet_files.len() { + 0 => Err(Error::NoWalletsFound), + 1 => Ok(filter_wallet_file_extension(&wallet_files[0])), + _ => get_wallet_selection(wallet_files), + }?; + + Ok(SELECTED_WALLET_ADDRESS + .get_or_init(|| wallet_address) + .to_string()) +} + +fn get_wallet_selection(wallet_files: Vec) -> Result { + list_wallets(&wallet_files); + + let selected_index = get_wallet_selection_input("Select by index:") + .parse::() + .map_err(|_| Error::InvalidSelection)?; + + if selected_index < 1 || selected_index > wallet_files.len() { + return Err(Error::InvalidSelection); + } + + Ok(filter_wallet_file_extension( + &wallet_files[selected_index - 1], + )) +} + +fn list_wallets(wallet_files: &[String]) { + println!("Wallets:"); + + let mut table = Table::new(); + + table.add_row(Row::new(vec![ + Cell::new("Index"), + Cell::new("Address"), + Cell::new("Encrypted"), + ])); + + for (index, wallet_file) in wallet_files.iter().enumerate() { + let encrypted = wallet_file.contains(ENCRYPTED_PRIVATE_KEY_EXT); + + table.add_row(Row::new(vec![ + Cell::new(&(index + 1).to_string()), + Cell::new(&filter_wallet_file_extension(wallet_file)), + Cell::new(&encrypted.to_string()), + ])); + } + + table.printstd(); +} + +fn get_wallet_files(wallets_folder: &PathBuf) -> Result, Error> { + let wallet_files = std::fs::read_dir(wallets_folder) + .map_err(|_| Error::WalletsFolderNotFound)? + .filter_map(Result::ok) + .filter_map(|dir_entry| dir_entry.file_name().into_string().ok()) + .filter(|file_name| { + let cleaned_file_name = filter_wallet_file_extension(file_name); + RewardsAddress::from_hex(cleaned_file_name).is_ok() + }) + .collect::>(); + + Ok(wallet_files) +} + +fn filter_wallet_file_extension(wallet_file: &str) -> String { + wallet_file.replace(ENCRYPTED_PRIVATE_KEY_EXT, "") +} diff --git a/autonomi-cli/src/wallet/input.rs b/autonomi-cli/src/wallet/input.rs new file mode 100644 index 0000000000..94e3223cd8 --- /dev/null +++ b/autonomi-cli/src/wallet/input.rs @@ -0,0 +1,68 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +pub(crate) fn get_wallet_selection_input(prompt: &str) -> String { + println!("{prompt}"); + + let mut buffer = String::new(); + let stdin = std::io::stdin(); + + if stdin.read_line(&mut buffer).is_err() { + // consider if error should process::exit(1) here + return "".to_string(); + }; + + // Remove leading and trailing whitespace + buffer.trim().to_owned() +} + +pub(crate) fn get_password_input(prompt: &str) -> String { + rpassword::prompt_password(prompt) + .map(|str| str.trim().into()) + .unwrap_or_default() +} + +pub(crate) fn confirm_password(password: &str) -> bool { + const MAX_RETRIES: u8 = 2; + + for _ in 0..MAX_RETRIES { + if get_password_input("Repeat password: ") == password { + return true; + } + println!("Passwords do not match."); + } + + false +} + +pub(crate) fn request_password(required: bool) -> Option { + let prompt = if required { + "Enter password: " + } else { + "Enter password (leave empty for none): " + }; + + loop { + let password = get_password_input(prompt); + + if password.is_empty() { + if required { + println!("Password is required."); + continue; + } + + return None; + } + + if confirm_password(&password) { + return Some(password); + } + + println!("Please set a new password."); + } +} diff --git a/autonomi-cli/src/wallet/mod.rs b/autonomi-cli/src/wallet/mod.rs new file mode 100644 index 0000000000..b0dddfb889 --- /dev/null +++ b/autonomi-cli/src/wallet/mod.rs @@ -0,0 +1,42 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::keys::{get_secret_key_from_env, load_evm_wallet_from_env}; +use crate::wallet::fs::{select_wallet, select_wallet_private_key}; +use autonomi::{EvmNetwork, Wallet}; + +pub(crate) mod encryption; +pub(crate) mod error; +pub(crate) mod fs; +pub(crate) mod input; + +pub const DUMMY_NETWORK: EvmNetwork = EvmNetwork::ArbitrumSepolia; + +/// Load wallet from ENV or disk +pub(crate) fn load_wallet() -> color_eyre::Result { + // First try wallet from ENV + if let Ok(wallet) = load_evm_wallet_from_env() { + return Ok(wallet); + } + + let wallet = select_wallet()?; + + Ok(wallet) +} + +/// Load wallet private key from ENV or disk +pub(crate) fn load_wallet_private_key() -> color_eyre::Result { + // First try wallet private key from ENV + if let Ok(private_key) = get_secret_key_from_env() { + return Ok(private_key); + } + + let private_key = select_wallet_private_key()?; + + Ok(private_key) +} diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 0d8e18a318..c73bef1378 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -33,7 +33,6 @@ extern crate tracing; pub mod client; -pub mod wallet; #[cfg(feature = "data")] mod self_encryption; mod utils; diff --git a/autonomi/src/wallet.rs b/autonomi/src/wallet.rs deleted file mode 100644 index 302a1f5744..0000000000 --- a/autonomi/src/wallet.rs +++ /dev/null @@ -1,139 +0,0 @@ -use std::io::Write; -use std::path::{Path, PathBuf}; -use std::collections::HashMap; -use sn_evm::EvmError; - - // wallet_encryption_storage(&file_path, encrypted_private_key) -use sn_evm::wallet::{get_client_wallet_dir_path,prompt_the_user_for_password,create_a_evm_wallet, create_file_with_keys, get_gas_token_details, get_random_private_key, wallet_encryption_status, wallet_encryption_storage, ENCRYPTED_MAIN_SECRET_KEY_FILENAME}; - -use sn_evm::encryption::{decrypt_secret_key,encrypt_secret_key}; - -pub fn import_evm_wallet(wallet_private_key: String) -> Result { - let wallet_public_key = create_a_evm_wallet(&wallet_private_key)?; - - let file_path = create_file_with_keys(wallet_private_key, wallet_public_key)?; - Ok(file_path) -} - -pub fn get_wallet_information(private_key: String){ - let _ = get_gas_token_details(&private_key); -} - -pub fn create_evm_wallet() -> Result { - let wallet_private_key = get_random_private_key(); - let wallet_public_key = create_a_evm_wallet(&wallet_private_key)?; - let file_path = create_file_with_keys(wallet_private_key, wallet_public_key)?; - // println!("A file is created with the path: {}", file_path); - Ok(file_path) -} - -pub fn encrypt_evm_wallet(file_path: String, password: String) -> Result { - if wallet_encryption_status(Path::new(&file_path)) { - return Ok(String::from("Provided File is already encrypted")); //replace with error condition later. - } - - let private_key = std::fs::read_to_string(&file_path) - .map_err(|_| EvmError::OperationError("Not able to get the file dir path".to_string()))?; - - let encrypted_private_key = encrypt_secret_key(&private_key, &password)?; - //make the wallet a directory. - - if Path::new(&file_path).is_file() { - std::fs::remove_file(&file_path).expect("not able to remove the file"); - std::fs::create_dir(&file_path).expect("not able to create the directory"); - } - - wallet_encryption_storage(&file_path, &encrypted_private_key) -} - - - -pub fn get_private_key_from_wallet(key: u32, files: HashMap) -> Result { - - match files.get(&key) { - Some(value) => { - let mut wallet_directory = get_wallet_directory(); - wallet_directory.push(value.1.clone()); - if value.0 == "unprotected" { - let file_contents= std::fs::read(&wallet_directory); - if let Ok(file_data) = file_contents { - let private_key = String::from_utf8(file_data).map_err(|_| EvmError::OperationError("Not able to convert file contents".to_string()))?; - - return Ok(private_key); - } - } - - if value.0 =="passw-protected" { - let _ = wallet_directory.push(ENCRYPTED_MAIN_SECRET_KEY_FILENAME); - println!("encrypted wallet path: {:?}", wallet_directory); - let encrypted_bytes = std::fs::read(wallet_directory); - if let Ok(file_data) = encrypted_bytes { - let encrypted_private_key = String::from_utf8(file_data).map_err(|_| EvmError::OperationError("Not able to convert file contents".to_string()))?; - let password = prompt_the_user_for_password(); - match password { - Some(value) => { - println!("Recieved the password, initiating decryption!"); - let private_key = decrypt_secret_key(&encrypted_private_key, &value)?; - return Ok(private_key); - }, - None => { - println!("The provided Password is not proper, can not proceed further"); - } - } - - } - } - }, - None => { - println!("Provided Key doesn't exist try again"); - }, - } - let return_value:Option = None; - let value = return_value.ok_or(EvmError::OperationError("Not able to get the private key".to_string())); - value - -} - -pub fn get_numbered_files(dir: PathBuf) -> std::io::Result> { - let mut file_map:HashMap = std::collections::HashMap::new(); // Create a new HashMap to store the files - let entries = std::fs::read_dir(dir)?; // Get an iterator over directory entries - - // Iterate over the entries and insert them into the HashMap - for (index, entry) in entries.enumerate() { - let entry = entry?; // Unwrap the entry from Result - let mut path = entry.path(); // Get the path of the entry - - if let Some(name) = path.file_name() { - let file_name = name.to_string_lossy().into_owned(); // Convert to String - let mut wallet_details =None; - if path.is_file() { - wallet_details = Some((String::from("unprotected"), file_name)); - } else if path.is_dir() { - path.push(ENCRYPTED_MAIN_SECRET_KEY_FILENAME); - if path.is_file() { - wallet_details = Some((String::from("passw-protected"), file_name)); - } - } - if let Some(wallet_value) = wallet_details { - file_map.insert((index + 1) as u32, wallet_value); - } - // Insert into HashMap with number as key - } - } - Ok(file_map) -} - -// Function to prompt the user for a key -pub fn prompt_for_key() -> Result { - print!("Enter a key to retrieve the file: "); - std::io::stdout().flush().map_err(|_| EvmError::OperationError("Not able to flush out stdio".to_string()))?; - let mut input = String::new(); - std::io::stdin().read_line(&mut input).map_err(|_| EvmError::OperationError("Not able to read the line contents".to_string()))?; - - let input_value: u32 = input.trim().parse().map_err(|_| EvmError::OperationError("Not able to read the line contents".to_string()))?; - Ok(input_value) -} - -pub fn get_wallet_directory() -> PathBuf { - get_client_wallet_dir_path().expect("error") -} \ No newline at end of file diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 13c9b675c5..fbd838843f 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -98,9 +98,7 @@ pub fn get_evm_network_from_env() -> Result { .map(|v| v == "arbitrum-sepolia") .unwrap_or(false); - if use_local_evm { - local_evm_network_from_csv() - } else if use_arbitrum_one { + if use_arbitrum_one { info!("Using Arbitrum One EVM network as EVM_NETWORK is set to 'arbitrum-one'"); Ok(Network::ArbitrumOne) } else if use_arbitrum_sepolia { @@ -113,6 +111,8 @@ pub fn get_evm_network_from_env() -> Result { &evm_vars[1], &evm_vars[2], ))) + } else if use_local_evm { + local_evm_network_from_csv() } else { error!("Failed to obtain EVM Network through any means"); Err(Error::FailedToGetEvmNetwork( diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index e11a066131..22350b1ff4 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -12,6 +12,7 @@ use crate::contract::network_token::NetworkToken; use crate::contract::{data_payments, network_token}; use crate::utils::http_provider; use crate::Network; +use alloy::hex::ToHexExt; use alloy::network::{Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder}; use alloy::providers::fillers::{ BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller, @@ -148,6 +149,12 @@ impl Wallet { pub async fn lock(&self) -> tokio::sync::MutexGuard<()> { self.lock.lock().await } + + /// Returns a random private key string. + pub fn random_private_key() -> String { + let signer: PrivateKeySigner = LocalSigner::random(); + signer.to_bytes().encode_hex_with_prefix() + } } /// Generate an EthereumWallet with a random private key. @@ -192,11 +199,6 @@ pub fn wallet_address(wallet: &EthereumWallet) -> Address { >::default_signer_address(wallet) } -pub fn get_random_private_key_for_wallet() -> String { - let signer: PrivateKeySigner = LocalSigner::random(); - signer.to_bytes().to_string() -} - /// Returns the raw balance of payment tokens for this wallet. pub async fn balance_of_tokens( account: Address, diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index df1c9d74ff..27859ae7df 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -31,10 +31,6 @@ tracing = { version = "~0.1.26" } xor_name = "5.0.0" ring = "0.17.8" tempfile = "3.10.1" -tokio = { version = "1", features = ["full"] } -rpassword = "7.0" -color-eyre = "~0.6" -dirs-next = "~2.0.0" [dev-dependencies] tokio = { version = "1.32.0", features = ["macros", "rt"] } diff --git a/sn_evm/src/encryption.rs b/sn_evm/src/encryption.rs deleted file mode 100644 index 134f13d93e..0000000000 --- a/sn_evm/src/encryption.rs +++ /dev/null @@ -1,134 +0,0 @@ -use rand::Rng; -use std::num::NonZeroU32; -use ring::aead::{BoundKey, Nonce, NonceSequence}; -use ring::error::Unspecified; -use crate::EvmError; -struct NonceSeq([u8; 12]); - -impl NonceSequence for NonceSeq { - fn advance(&mut self) -> std::result::Result { - Nonce::try_assume_unique_for_key(&self.0) - } -} - - -/// Number of iterations for pbkdf2. -const ITERATIONS: NonZeroU32 = match NonZeroU32::new(100_000) { - Some(v) => v, - None => panic!("`100_000` is not be zero"), -}; - -const SALT_LENGTH: usize = 8; -const NONCE_LENGTH: usize = 12; - -pub fn encrypt_secret_key( - secret_key: &str, - password: &str, -) -> Result { - // Generate a random salt - // Salt is used to ensure unique derived keys even for identical passwords - let mut salt = [0u8; SALT_LENGTH]; - rand::thread_rng().fill(&mut salt); - - // Generate a random nonce - // Nonce is used to ensure unique encryption outputs even for identical inputs - let mut nonce = [0u8; NONCE_LENGTH]; - rand::thread_rng().fill(&mut nonce); - - let mut key = [0; 32]; - - // Derive a key from the password using PBKDF2 with HMAC - // PBKDF2 is used for key derivation to mitigate brute-force attacks by making key derivation computationally expensive - // HMAC is used as the pseudorandom function for its security properties - ring::pbkdf2::derive( - ring::pbkdf2::PBKDF2_HMAC_SHA512, - ITERATIONS, - &salt, - password.as_bytes(), - &mut key, - ); - - // Create an unbound key using CHACHA20_POLY1305 algorithm - // CHACHA20_POLY1305 is a fast and secure AEAD (Authenticated Encryption with Associated Data) algorithm - let unbound_key = ring::aead::UnboundKey::new(&ring::aead::CHACHA20_POLY1305, &key) - .map_err(|_| EvmError::FailedToEncryptKey(String::from("Could not create unbound key.")))?; - - // Create a sealing key with the unbound key and nonce - let mut sealing_key = ring::aead::SealingKey::new(unbound_key, NonceSeq(nonce)); - let aad = ring::aead::Aad::from(&[]); - - // Convert the secret key to bytes - let secret_key_bytes = String::from(secret_key).into_bytes(); - let mut encrypted_secret_key = secret_key_bytes; - - // seal_in_place_append_tag encrypts the data and appends an authentication tag to ensure data integrity - sealing_key - .seal_in_place_append_tag(aad, &mut encrypted_secret_key) - .map_err(|_| EvmError::FailedToEncryptKey(String::from("Could not seal sealing key.")))?; - - // encrypted_secret_key.extend_from_slice(&salt); - // encrypted_secret_key.extend_from_slice(&salt); - let mut encrypted_data = Vec::new(); - encrypted_data.extend_from_slice(&salt); - encrypted_data.extend_from_slice(&nonce); - encrypted_data.extend_from_slice(&encrypted_secret_key); - - // Return the encrypted secret key along with salt and nonce encoded as hex strings - Ok(hex::encode(encrypted_data)) -} - - -pub fn decrypt_secret_key( - encrypted_data: &str, - password: &str - ) -> Result { - - let encrypted_data = hex::decode(encrypted_data) - .map_err(|_| EvmError::FailedToDecryptKey(String::from("Could not seal sealing key.")))?; - let salt: [u8; SALT_LENGTH] = encrypted_data[..SALT_LENGTH] - .try_into().map_err(|_| EvmError::FailedToDecryptKey(String::from("could not process the hashed data.")))?; - let nonce:[u8; NONCE_LENGTH] = encrypted_data[SALT_LENGTH..SALT_LENGTH+NONCE_LENGTH] - .try_into().map_err(|_| EvmError::FailedToDecryptKey(String::from("Could not process the hashed data")))?; - let encrypted_secretkey = &encrypted_data[SALT_LENGTH+ NONCE_LENGTH ..]; - - let mut key = [0; 32]; - - // Reconstruct the key from salt and password - ring::pbkdf2::derive( - ring::pbkdf2::PBKDF2_HMAC_SHA512, - ITERATIONS, - &salt, - password.as_bytes(), - &mut key, - ); - - // Create an unbound key from the previously reconstructed key - let unbound_key = ring::aead::UnboundKey::new(&ring::aead::CHACHA20_POLY1305, &key) - .map_err(|_| { - EvmError::FailedToDecryptKey(String::from("Could not create unbound key.")) - })?; - - - // Create an opening key using the unbound key and original nonce - let mut opening_key = ring::aead::OpeningKey::new(unbound_key, NonceSeq(nonce)); - let aad = ring::aead::Aad::from(&[]); - - // Convert the hex encoded and encrypted secret key to bytes - // let mut encrypted_secret_key = hex::decode(encrypted_secretkey).map_err(|_| { - // EvmError::FailedToDecryptKey(String::from("Invalid encrypted secret key encoding.")) - // }).expect("error"); - - let mut encrypted_secret_key = encrypted_secretkey.to_vec(); - // Decrypt the encrypted secret key bytes - let decrypted_data = opening_key - .open_in_place(aad, &mut encrypted_secret_key) - .map_err(|_| EvmError::FailedToDecryptKey(String::from("Could not open encrypted key, please check the password")))?; - - let mut secret_key_bytes = [0u8; 66]; - secret_key_bytes.copy_from_slice(&decrypted_data[0..66]); - - // Create secret key from decrypted byte - - Ok(String::from_utf8(secret_key_bytes.to_vec()).expect("not able to convert private key")) - -} \ No newline at end of file diff --git a/sn_evm/src/error.rs b/sn_evm/src/error.rs index 7086ed8180..afbd02a004 100644 --- a/sn_evm/src/error.rs +++ b/sn_evm/src/error.rs @@ -27,16 +27,6 @@ pub enum EvmError { NumericOverflow, #[error("Not enough balance, {0} available, {1} required")] NotEnoughBalance(AttoTokens, AttoTokens), - #[error("Invalid quote public key")] InvalidQuotePublicKey, - - #[error("Failed to encrypt secret key: {0}")] - FailedToEncryptKey(String), - - #[error("Failed to decrypt secret key: {0}")] - FailedToDecryptKey(String), - - #[error("Failed to process Operation: {0}")] - OperationError(String), } diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index 40b2b2a762..49956db39e 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -27,8 +27,6 @@ pub use evmlib::Network as EvmNetwork; mod amount; mod data_payments; mod error; -pub mod wallet; -pub mod encryption; pub use data_payments::{PaymentQuote, ProofOfPayment, QuotingMetrics}; diff --git a/sn_evm/src/wallet.rs b/sn_evm/src/wallet.rs deleted file mode 100644 index 1e41da8054..0000000000 --- a/sn_evm/src/wallet.rs +++ /dev/null @@ -1,103 +0,0 @@ -use evmlib::wallet::{get_random_private_key_for_wallet, Wallet}; -use evmlib::utils::get_evm_network_from_env; -use tokio::{runtime::Runtime, task}; -use std::fs::File; -use std::fs; -use std::io::Write; -use std::path::Path; -use rpassword::read_password; -use color_eyre::eyre::{eyre, Context, Result}; -use std::path::PathBuf; -use crate::EvmError; - -pub const ENCRYPTED_MAIN_SECRET_KEY_FILENAME: &str = "main_secret_key.encrypted"; - - - -pub fn get_random_private_key() -> String { - get_random_private_key_for_wallet() -} - -pub fn get_gas_token_details(private_key: &String) -> Result<(),EvmError>{ - - let network = get_evm_network_from_env() - .map_err(|_| EvmError::OperationError("Not able to create the Network".to_string()))?; - - - let wallet = Wallet::new_from_private_key(network, &private_key) - .map_err(|_| EvmError::OperationError("Not able to create the Wallet".to_string()))?; - - task::block_in_place(|| { - let rt = Runtime::new() - .map_err(|_| EvmError::OperationError("Not able to create tokio runtime for wallet operation".to_string())) - .expect("Not able to create the runtime"); - - rt.block_on(async { - match wallet.balance_of_gas_tokens().await { - Ok(balance) => println!("balance of gas tokens: {:?}", balance), - Err(e) => eprintln!("Error: {:?}", e), - } - match wallet.balance_of_tokens().await { - Ok(balance) => println!("balance of tokens: {:?}", balance), - Err(e) => eprintln!("Error: {:?}", e), - } - - }) - }); - Ok(()) -} - -pub fn create_a_evm_wallet(private_key: &String) -> Result { - let network = get_evm_network_from_env() - .map_err(|_| EvmError::OperationError("Not able to create the Network".to_string()))?; - let wallet = Wallet::new_from_private_key(network, &private_key) - .map_err(|_| EvmError::OperationError("Not able to get the wallet".to_string()))?; - Ok(hex::encode(wallet.address())) -} - -pub fn create_file_with_keys(private_key: String, public_key: String) -> Result { - let mut file_dir_path = get_client_wallet_dir_path() - .map_err(|_| EvmError::OperationError("Not able to get the file dir path".to_string()))?; - file_dir_path.push(public_key); - let mut file = File::create(&file_dir_path).map_err(|_| EvmError::OperationError("Not able to create the wallet file".to_string()))?; - file.write_all(private_key.as_bytes()).map_err(|_| EvmError::OperationError("Not able to write into wallet".to_string()))?; - Ok(file_dir_path.to_string_lossy().to_string()) -} - -pub fn wallet_encryption_status(root_dir: &Path) -> bool { - let wallelt_file_path = root_dir.join(ENCRYPTED_MAIN_SECRET_KEY_FILENAME); - wallelt_file_path.is_file() -} - -pub fn wallet_encryption_storage(dir_path: &str, content: &str) -> Result { - // ensure the directory exists; - fs::create_dir_all(dir_path).map_err(|_| EvmError::OperationError("Not able to create the directory".to_string()))?; - let file_path = format!("{}/{}", dir_path, ENCRYPTED_MAIN_SECRET_KEY_FILENAME); - - let mut file = File::create(&file_path).map_err(|_| EvmError::OperationError("Not able to create the file".to_string()))?; - file.write_all(content.as_bytes()).map_err(|_| EvmError::OperationError("Not able to write into the file".to_string()))?; - let file_path = Path::new(&file_path).canonicalize().map_err(|_| EvmError::OperationError("Not able to get the full path of the wallet".to_string()))?; - Ok(file_path.to_string_lossy().to_string()) -} - -pub fn prompt_the_user_for_password() -> Option { - println!("Please enter the password: "); - let pwd = match read_password() { - Ok(pwd) => pwd, - Err(e) => { - eprintln!("Failed to read password: {}",e); - return None; - } - }; - Some(pwd) -} - -pub fn get_client_wallet_dir_path() -> Result { - let mut home_dirs = dirs_next::data_dir() - .ok_or_else(|| eyre!("Failed to obtain data dir, your OS might not be supported."))?; - home_dirs.push("safe"); - home_dirs.push("autonomi"); - home_dirs.push("wallets"); - std::fs::create_dir_all(home_dirs.as_path()).wrap_err("Failed to create data dir")?; - Ok(home_dirs) -} \ No newline at end of file From 5babd2682260503d7fec59759662846efc70d6d4 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 29 Oct 2024 15:27:06 +0000 Subject: [PATCH 127/128] docs: changelog for 2024.10.4.2 Also provide new keys. --- CHANGELOG.md | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 8 +++--- 2 files changed, 78 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f4d77a9c6..d68be75785 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,80 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 *When editing this file, please respect a line length of 100.* +## 2024-10-28 + +## Autonomi API/CLI + +#### Added + +- Private data support. +- Local user data support. +- Network Vault containing user data encrypted. +- Archives with Metadata. +- Prepaid upload support for data_put using receipts. + +#### Changed + +- Contract token approval amount set to infinite before doing data payments. + +### Client + +#### Added + +- Expose APIs in WASM (e.g. archives, vault and user data within vault). +- Uploads are not run in parallel. +- Support for local wallets. +- Provide `wallet create` command. +- Provide `wallet balance` command. + +#### Changed + +- Take metadata from file system and add `uploaded` field for time of upload. + +#### Fixed + +- Make sure we use the new client path throughout the codebase + +### Network + +#### Added + +- Get range used for store cost and register queries. +- Re-enabled large_file_upload, memcheck, benchmark CI tests. + +#### Changed + +- Scratchpad modifications to support multiple data encodings. +- Registers are now merged at the network level, preventing failures during update and during + replication. +- Libp2p config and get range tweaks reduce intensity of operations. Brings down CPU usage + considerably. +- Libp2p’s native kad bootstrap interval introduced in 0.54.1 is intensive, and as we roll our own, + we significantly reduce the kad period to lighten the CPU load. +- Wipe node’s storage dir when restarting for new network + +#### Fixed + +- Fixes in networking code for WASM compatibility (replacing `std::time` with compatible + alternative). +- Event dropped errors should not happen if the event is not dropped. +- Reduce outdated connection pruning frequency. + +### Node Manager + +#### Fixed + +- Local node register is cleaned up when --clean flag applied (prevents some errors when register + changes). + +### Launchpad + +#### Fixed + +- Status screen is updated after nodes have been reset. +- Rewards Address is required before starting nodes. User input is required. +- Spinner does not stop spinning after two minutes when nodes are running. + ## 2024-10-24 ### Network diff --git a/README.md b/README.md index 9fec7c0091..67ea01d426 100644 --- a/README.md +++ b/README.md @@ -32,10 +32,10 @@ You should build from the `stable` branch, as follows: ``` git checkout stable -export FOUNDATION_PK=88a82d718d16dccc839188eddc9a46cb216667c940cd46285199458c919a170a55490db09763ae216ed25e9db78c3576 -export GENESIS_PK=aa3526db2dbc43998e0b541b8455e2ce9dd4f1cad80090e671da16e3cd11cd5e3550f74c3cefd09ad253d93cacae2320 -export NETWORK_ROYALTIES_PK=8b5463a2c8142959a7b7cfd9295587812eb07ccbe13a85865503c8004eeeb6889ccace3588dcf9f7396784d9ee48f4d5 -export PAYMENT_FORWARD_PK=87d5b511a497183c945df63ab8790a4b94cfe452d00bfbdb39e41ee861384fe0de716a224da1c6fd11356de49877dfc2 +export FOUNDATION_PK=b20c916c7a28707018292f06dfdb66ab88ebcbad9c78d18135e843a91b1d66b875b24d2c27d8d1ad4637c2d5811896fe +export GENESIS_PK=93f7355906fa8c1a639bac80f4619dbb4cf5f71c47827d1ff2c30f0d133f6b841859662cbf7e0bbceca0eb0f521f6ebc +export NETWORK_ROYALTIES_PK=af451aa34a0d16c50eb217b91ab6b2ca75ef43b9c20449384ff1e90dbf8477351499cca985828e33b208805dadc80c63 +export PAYMENT_FORWARD_PK=adc6401588af49c60af6717a60546207abddb4e150014b4ab6c407ef6d7b3d3899b8892a91ab23042378b7b285e655fc cargo build --release --features=network-contacts --bin safenode ``` From 37ef3ab050320d355e63b31e366468ce529728b6 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 29 Oct 2024 15:45:48 +0000 Subject: [PATCH 128/128] chore(release): stable release 2024.10.4.2 ================== Crate Versions ================== autonomi: 0.2.2 autonomi-cli: 0.1.3 evmlib: 0.1.2 evm_testnet: 0.1.2 sn_build_info: 0.1.17 sn_evm: 0.1.2 sn_logging: 0.2.38 sn_metrics: 0.1.18 nat-detection: 0.2.9 sn_networking: 0.19.1 sn_node: 0.112.2 node-launchpad: 0.4.2 sn_node_manager: 0.11.1 sn_node_rpc_client: 0.6.33 sn_peers_acquisition: 0.5.5 sn_protocol: 0.17.13 sn_registers: 0.4.1 sn_service_management: 0.4.1 sn_transfers: 0.20.1 test_utils: 0.4.9 token_supplies: 0.1.56 =================== Binary Versions =================== nat-detection: 0.2.9 node-launchpad: 0.4.2 autonomi: 0.1.3 safenode: 0.112.2 safenode-manager: 0.11.1 safenode_rpc_client: 0.6.33 safenodemand: 0.11.1 --- Cargo.lock | 42 ++++++++++++++++---------------- autonomi-cli/Cargo.toml | 12 ++++----- autonomi/Cargo.toml | 18 +++++++------- evm_testnet/Cargo.toml | 6 ++--- evmlib/Cargo.toml | 2 +- nat-detection/Cargo.toml | 8 +++--- node-launchpad/Cargo.toml | 14 +++++------ sn_build_info/Cargo.toml | 2 +- sn_evm/Cargo.toml | 4 +-- sn_logging/Cargo.toml | 2 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 12 ++++----- sn_node/Cargo.toml | 28 ++++++++++----------- sn_node_manager/Cargo.toml | 16 ++++++------ sn_node_rpc_client/Cargo.toml | 16 ++++++------ sn_peers_acquisition/Cargo.toml | 4 +-- sn_protocol/Cargo.toml | 10 ++++---- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 8 +++--- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 6 ++--- token_supplies/Cargo.toml | 2 +- 22 files changed, 109 insertions(+), 109 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 95fe9c532a..dfcaa5e8c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1076,7 +1076,7 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "autonomi" -version = "0.2.2-rc.2" +version = "0.2.2" dependencies = [ "alloy", "bip39", @@ -1123,7 +1123,7 @@ dependencies = [ [[package]] name = "autonomi-cli" -version = "0.1.3-rc.2" +version = "0.1.3" dependencies = [ "autonomi", "clap", @@ -2809,7 +2809,7 @@ dependencies = [ [[package]] name = "evm_testnet" -version = "0.1.2-rc.2" +version = "0.1.2" dependencies = [ "clap", "dirs-next", @@ -2820,7 +2820,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.2-rc.2" +version = "0.1.2" dependencies = [ "alloy", "dirs-next", @@ -5630,7 +5630,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.9-rc.2" +version = "0.2.9" dependencies = [ "clap", "clap-verbosity-flag", @@ -5747,7 +5747,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.4.2-rc.2" +version = "0.4.2" dependencies = [ "atty", "better-panic", @@ -8147,7 +8147,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.11.1-rc.2" +version = "0.11.1" dependencies = [ "assert_cmd", "assert_fs", @@ -8223,7 +8223,7 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.17-rc.2" +version = "0.1.17" dependencies = [ "chrono", "tracing", @@ -8265,7 +8265,7 @@ dependencies = [ [[package]] name = "sn_evm" -version = "0.1.2-rc.2" +version = "0.1.2" dependencies = [ "custom_debug", "evmlib", @@ -8288,7 +8288,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.38-rc.2" +version = "0.2.38" dependencies = [ "chrono", "color-eyre", @@ -8313,7 +8313,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.18-rc.2" +version = "0.1.18" dependencies = [ "clap", "color-eyre", @@ -8327,7 +8327,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.19.1-rc.2" +version = "0.19.1" dependencies = [ "aes-gcm-siv", "async-trait", @@ -8372,7 +8372,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.112.2-rc.2" +version = "0.112.2" dependencies = [ "assert_fs", "async-trait", @@ -8429,7 +8429,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.33-rc.2" +version = "0.6.33" dependencies = [ "assert_fs", "async-trait", @@ -8456,7 +8456,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.5.5-rc.2" +version = "0.5.5" dependencies = [ "clap", "lazy_static", @@ -8472,7 +8472,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.13-rc.2" +version = "0.17.13" dependencies = [ "blsttc", "bytes", @@ -8502,7 +8502,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.4.1-rc.2" +version = "0.4.1" dependencies = [ "blsttc", "crdts", @@ -8519,7 +8519,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.4.1-rc.2" +version = "0.4.1" dependencies = [ "async-trait", "dirs-next", @@ -8545,7 +8545,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.20.1-rc.2" +version = "0.20.1" dependencies = [ "assert_fs", "blsttc", @@ -8889,7 +8889,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.9-rc.2" +version = "0.4.9" dependencies = [ "bytes", "color-eyre", @@ -9033,7 +9033,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.56-rc.2" +version = "0.1.56" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index aed30589b5..fb49e41f33 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] name = "autonomi-cli" description = "Autonomi CLI" license = "GPL-3.0" -version = "0.1.3-rc.2" +version = "0.1.3" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -24,7 +24,7 @@ name = "files" harness = false [dependencies] -autonomi = { path = "../autonomi", version = "0.2.2-rc.2", features = [ +autonomi = { path = "../autonomi", version = "0.2.2", features = [ "data", "fs", "vault", @@ -50,9 +50,9 @@ tokio = { version = "1.32.0", features = [ "fs", ] } tracing = { version = "~0.1.26" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_logging = { path = "../sn_logging", version = "0.2.38" } walkdir = "2.5.0" serde_json = "1.0.132" serde = "1.0.210" @@ -60,7 +60,7 @@ hex = "0.4.3" ring = "0.17.8" [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.2.2-rc.2", features = [ +autonomi = { path = "../autonomi", version = "0.2.2", features = [ "data", "fs", ] } diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 576b864ca3..6f5491a4f3 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.2.2-rc.2" +version = "0.2.2" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -38,11 +38,11 @@ rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.19.1-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } -sn_protocol = { version = "0.17.13-rc.2", path = "../sn_protocol" } -sn_registers = { path = "../sn_registers", version = "0.4.1-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } +sn_networking = { path = "../sn_networking", version = "0.19.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } +sn_protocol = { version = "0.17.13", path = "../sn_protocol" } +sn_registers = { path = "../sn_registers", version = "0.4.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } thiserror = "1.0.23" tokio = { version = "1.35.0", features = ["sync"] } tracing = { version = "~0.1.26" } @@ -60,8 +60,8 @@ blstrs = "0.7.1" alloy = { version = "0.5.3", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } eyre = "0.6.5" sha2 = "0.10.6" -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.38" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. test_utils = { path = "../test_utils" } @@ -71,7 +71,7 @@ wasm-bindgen-test = "0.3.43" [target.'cfg(target_arch = "wasm32")'.dependencies] console_error_panic_hook = "0.1.7" -evmlib = { path = "../evmlib", version = "0.1.2-rc.2", features = ["wasm-bindgen"] } +evmlib = { path = "../evmlib", version = "0.1.2", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } js-sys = "0.3.70" diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml index e2c79cc40b..5182f2eca7 100644 --- a/evm_testnet/Cargo.toml +++ b/evm_testnet/Cargo.toml @@ -6,13 +6,13 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm_testnet" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.2-rc.2" +version = "0.1.2" [dependencies] clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.2-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } +evmlib = { path = "../evmlib", version = "0.1.2" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index d58252a6d7..23c6a35e45 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.2-rc.2" +version = "0.1.2" [features] wasm-bindgen = ["alloy/wasm-bindgen"] diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 83a05f15a0..5da84e4066 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.9-rc.2" +version = "0.2.9" [[bin]] name = "nat-detection" @@ -31,9 +31,9 @@ libp2p = { version = "0.54.1", features = [ "macros", "upnp", ] } -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } -sn_networking = { path = "../sn_networking", version = "0.19.1-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_networking = { path = "../sn_networking", version = "0.19.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 680d3618d0..b9ee73af76 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.4.2-rc.2" +version = "0.4.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -51,13 +51,13 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } -sn-node-manager = { version = "0.11.1-rc.2", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.5.5-rc.2", path = "../sn_peers_acquisition" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } +sn-node-manager = { version = "0.11.1", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.5.5", path = "../sn_peers_acquisition" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.4.1-rc.2", path = "../sn_service_management" } +sn_service_management = { version = "0.4.1", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index b91a71931a..d20a5f947b 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.17-rc.2" +version = "0.1.17" build = "build.rs" include = ["Cargo.toml", "src/**/*", "build.rs"] diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index 27859ae7df..37c9d84cb8 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_evm" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.2-rc.2" +version = "0.1.2" [features] test-utils = [] @@ -17,7 +17,7 @@ external-signer = ["evmlib/external-signer"] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib", version = "0.1.2-rc.2" } +evmlib = { path = "../evmlib", version = "0.1.2" } hex = "~0.4.3" lazy_static = "~1.4.0" libp2p = { version = "0.53", features = ["identify", "kad"] } diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index abddb1cd42..8b6d7d8802 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.38-rc.2" +version = "0.2.38" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index e13285aed2..103d1d628e 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.18-rc.2" +version = "0.1.18" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index a12ee3f8f5..1a6bdc5b67 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.19.1-rc.2" +version = "0.19.1" [features] default = [] @@ -54,11 +54,11 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.4.1-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1" } +sn_registers = { path = "../sn_registers", version = "0.4.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index cd248b6452..5903b68729 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.112.2-rc.2" +version = "0.112.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -52,15 +52,15 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } -sn_networking = { path = "../sn_networking", version = "0.19.1-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.4.1-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2" } -sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } +sn_logging = { path = "../sn_logging", version = "0.2.38" } +sn_networking = { path = "../sn_networking", version = "0.19.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13" } +sn_registers = { path = "../sn_registers", version = "0.4.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1" } +sn_service_management = { path = "../sn_service_management", version = "0.4.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } sysinfo = { version = "0.30.8", default-features = false } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ @@ -83,16 +83,16 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] -evmlib = { path = "../evmlib", version = "0.1.2-rc.2" } -autonomi = { path = "../autonomi", version = "0.2.2-rc.2", features = ["registers"] } +evmlib = { path = "../evmlib", version = "0.1.2" } +autonomi = { path = "../autonomi", version = "0.2.2", features = ["registers"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2", features = [ +sn_protocol = { path = "../sn_protocol", version = "0.17.13", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.20.1", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 6b7879f22d..b3e651927e 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.11.1-rc.2" +version = "0.11.1" [[bin]] name = "safenode-manager" @@ -46,14 +46,14 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2" } -sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_logging = { path = "../sn_logging", version = "0.2.38" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13" } +sn_service_management = { path = "../sn_service_management", version = "0.4.1" } sn-releases = "0.2.6" -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index c947bb632b..cdeb4a2dc1 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.33-rc.2" +version = "0.6.33" [[bin]] name = "safenode_rpc_client" @@ -26,13 +26,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } -sn_node = { path = "../sn_node", version = "0.112.2-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.4.1-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_logging = { path = "../sn_logging", version = "0.2.38" } +sn_node = { path = "../sn_node", version = "0.112.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.4.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 0349565a03..2d40d10161 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.5.5-rc.2" +version = "0.5.5" [features] local = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2", optional = true} +sn_protocol = { path = "../sn_protocol", version = "0.17.13", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 7666240409..832a832206 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.13-rc.2" +version = "0.17.13" [features] default = [] @@ -28,10 +28,10 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_build_info = { path = "../sn_build_info", version = "0.1.17-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.20.1-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.4.1-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.17" } +sn_transfers = { path = "../sn_transfers", version = "0.20.1" } +sn_registers = { path = "../sn_registers", version = "0.4.1" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index 4b9dbf8930..596ce700ed 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.1-rc.2" +version = "0.4.1" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index e3f3886f0b..5cdfd7cd8f 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.1-rc.2" +version = "0.4.1" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.38-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.13-rc.2", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.38" } +sn_protocol = { path = "../sn_protocol", version = "0.17.13", features = [ "rpc", ] } -sn_evm = { path = "../sn_evm", version = "0.1.2-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.2" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index f76853cbc8..f156f93de9 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.20.1-rc.2" +version = "0.20.1" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 5472b3033c..5acb11e414 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.9-rc.2" +version = "0.4.9" [features] local = ["sn_peers_acquisition/local"] @@ -16,9 +16,9 @@ local = ["sn_peers_acquisition/local"] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "~0.6.2" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.2-rc.2" } +evmlib = { path = "../evmlib", version = "0.1.2" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } serde_json = "1.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.5" } diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index b076a9a97f..cf18a18ec8 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.56-rc.2" +version = "0.1.56" [dependencies]