From dd57a7e43c275fd13afe2d24d89756f6e5b2bbdc Mon Sep 17 00:00:00 2001 From: jairajdev Date: Thu, 11 Dec 2025 13:19:09 +0800 Subject: [PATCH 1/3] feat: add benchmark query script using autocannon for performance testing --- .gitignore | 3 + package-lock.json | 314 ++++++++++++++++++++++++++++++++++++- package.json | 2 + scripts/benchmark_query.ts | 59 +++++++ 4 files changed, 377 insertions(+), 1 deletion(-) create mode 100644 scripts/benchmark_query.ts diff --git a/.gitignore b/.gitignore index 7f4d4f3..4917f60 100644 --- a/.gitignore +++ b/.gitignore @@ -38,11 +38,14 @@ yarn-error.log* *.tsbuildinfo next-env.d.ts +benchmark-data + collector-db collector-stats-db collector.log server.log aggregator.log +benchmark.log # currently, in order to work well, tsconfig is being overrided when compiled # can be removed once we sort out currently tsconfig issues diff --git a/package-lock.json b/package-lock.json index 07d8fe3..d6cb03c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -57,6 +57,7 @@ }, "devDependencies": { "@types/amqplib": "0.10.5", + "@types/autocannon": "7.12.7", "@types/fastify-cors": "2.1.0", "@types/lodash": "4.14.191", "@types/node": "18.19.1", @@ -66,6 +67,7 @@ "@typescript-eslint/eslint-plugin": "5.60.1", "@typescript-eslint/parser": "5.60.1", "@typescript-eslint/typescript-estree": "5.61.0", + "autocannon": "8.0.0", "eslint": "8.44.0", "eslint-config-next": "13.4.8", "eslint-config-prettier": "8.8.0", @@ -123,6 +125,12 @@ "node": ">=6.0.0" } }, + "node_modules/@assemblyscript/loader": { + "version": "0.19.23", + "resolved": "https://registry.npmjs.org/@assemblyscript/loader/-/loader-0.19.23.tgz", + "integrity": "sha512-ulkCYfFbYj01ie1MDOyxv2F6SpRN1TOj7fQxbP07D6HmeR+gr2JLSmINKjga2emB+b1L2KGrFKBTc+e00p54nw==", + "dev": true + }, "node_modules/@babel/code-frame": { "version": "7.23.5", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", @@ -1835,6 +1843,16 @@ "node": ">=6.9.0" } }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "dev": true, + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, "node_modules/@cspotcode/source-map-support": { "version": "0.8.1", "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", @@ -2140,6 +2158,15 @@ "node-pre-gyp": "bin/node-pre-gyp" } }, + "node_modules/@minimistjs/subarg": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@minimistjs/subarg/-/subarg-1.0.0.tgz", + "integrity": "sha512-Q/ONBiM2zNeYUy0mVSO44mWWKYM3UHuEK43PKIOzJCbvUnPoMH1K+gk3cf1kgnCVJFlWmddahQQCmrmBGlk9jQ==", + "dev": true, + "dependencies": { + "minimist": "^1.1.0" + } + }, "node_modules/@next/env": { "version": "13.3.4", "resolved": "https://registry.npmjs.org/@next/env/-/env-13.3.4.tgz", @@ -3061,6 +3088,15 @@ "@types/node": "*" } }, + "node_modules/@types/autocannon": { + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@types/autocannon/-/autocannon-7.12.7.tgz", + "integrity": "sha512-Pd4nPf7wRpacULa6D/EC9x3CwzFQXwA0z5WFuik/fvJjW44V3WzBTM3jtt8nSBoflUNgswPiMCtgrr1bwnAcMg==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/engine.io": { "version": "3.1.7", "resolved": "https://registry.npmjs.org/@types/engine.io/-/engine.io-3.1.7.tgz", @@ -4105,6 +4141,110 @@ "node": ">=8.0.0" } }, + "node_modules/autocannon": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/autocannon/-/autocannon-8.0.0.tgz", + "integrity": "sha512-fMMcWc2JPFcUaqHeR6+PbmEpTxCrPZyBUM95oG4w3ngJ8NfBNas/ZXA+pTHXLqJ0UlFVTcy05GC25WxKx/M20A==", + "dev": true, + "dependencies": { + "@minimistjs/subarg": "^1.0.0", + "chalk": "^4.1.0", + "char-spinner": "^1.0.1", + "cli-table3": "^0.6.0", + "color-support": "^1.1.1", + "cross-argv": "^2.0.0", + "form-data": "^4.0.0", + "has-async-hooks": "^1.0.0", + "hdr-histogram-js": "^3.0.0", + "hdr-histogram-percentiles-obj": "^3.0.0", + "http-parser-js": "^0.5.2", + "hyperid": "^3.0.0", + "lodash.chunk": "^4.2.0", + "lodash.clonedeep": "^4.5.0", + "lodash.flatten": "^4.4.0", + "manage-path": "^2.0.0", + "on-net-listen": "^1.1.1", + "pretty-bytes": "^5.4.1", + "progress": "^2.0.3", + "reinterval": "^1.1.0", + "retimer": "^3.0.0", + "semver": "^7.3.2", + "timestring": "^6.0.0" + }, + "bin": { + "autocannon": "autocannon.js" + } + }, + "node_modules/autocannon/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/autocannon/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/autocannon/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/autocannon/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/autocannon/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/autocannon/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/available-typed-arrays": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", @@ -4668,6 +4808,12 @@ "node": ">=4" } }, + "node_modules/char-spinner": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/char-spinner/-/char-spinner-1.0.1.tgz", + "integrity": "sha512-acv43vqJ0+N0rD+Uw3pDHSxP30FHrywu2NO6/wBaHChJIizpDeBUd6NjqhNhy9LGaEAhZAXn46QzmlAvIWd16g==", + "dev": true + }, "node_modules/chardet": { "version": "0.7.0", "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", @@ -4750,6 +4896,21 @@ "node": ">=8" } }, + "node_modules/cli-table3": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", + "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, "node_modules/cli-tableau": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/cli-tableau/-/cli-tableau-2.0.1.tgz", @@ -5063,6 +5224,12 @@ "integrity": "sha512-/f6gpQuxDaqXu+1kwQYSckUglPaOrHdbIlBAu0YuW8/Cdb45XwXYNUBXg3r/9Mo6n540Kn/smKcZWko5x99KrQ==", "dev": true }, + "node_modules/cross-argv": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/cross-argv/-/cross-argv-2.0.0.tgz", + "integrity": "sha512-YIaY9TR5Nxeb8SMdtrU8asWVM4jqJDNDYlKV21LxtYcfNJhp1kEsgSa6qXwXgzN0WQWGODps0+TlGp2xQSHwOg==", + "dev": true + }, "node_modules/cross-fetch": { "version": "3.1.8", "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", @@ -8165,6 +8332,12 @@ "node": ">= 0.4.0" } }, + "node_modules/has-async-hooks": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-async-hooks/-/has-async-hooks-1.0.0.tgz", + "integrity": "sha512-YF0VPGjkxr7AyyQQNykX8zK4PvtEDsUJAPqwu06UFz1lb6EvI53sPh5H1kWxg8NXI5LsfRCZ8uX9NkYDZBb/mw==", + "dev": true + }, "node_modules/has-bigints": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", @@ -8246,6 +8419,32 @@ "node": ">= 0.4" } }, + "node_modules/hdr-histogram-js": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/hdr-histogram-js/-/hdr-histogram-js-3.0.1.tgz", + "integrity": "sha512-l3GSdZL1Jr1C0kyb461tUjEdrRPZr8Qry7jByltf5JGrA0xvqOSrxRBfcrJqqV/AMEtqqhHhC6w8HW0gn76tRQ==", + "dev": true, + "dependencies": { + "@assemblyscript/loader": "^0.19.21", + "base64-js": "^1.2.0", + "pako": "^1.0.3" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/hdr-histogram-js/node_modules/pako": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", + "dev": true + }, + "node_modules/hdr-histogram-percentiles-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hdr-histogram-percentiles-obj/-/hdr-histogram-percentiles-obj-3.0.0.tgz", + "integrity": "sha512-7kIufnBqdsBGcSZLPJwqHT3yhk1QTsSlFsVD3kx5ixH/AlgBs9yM1q6DPhXZ8f8gtdqgh7N7/5btRLpQsS2gHw==", + "dev": true + }, "node_modules/highcharts": { "version": "10.3.2", "resolved": "https://registry.npmjs.org/highcharts/-/highcharts-10.3.2.tgz", @@ -8321,6 +8520,12 @@ "node": ">= 0.6" } }, + "node_modules/http-parser-js": { + "version": "0.5.10", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.10.tgz", + "integrity": "sha512-Pysuw9XpUq5dVc/2SMHpuTY01RFl8fttgcyunjL7eEMhGM3cI4eOmiCycJDVCo/7O7ClfQD3SaI6ftDzqOXYMA==", + "dev": true + }, "node_modules/http-proxy-agent": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", @@ -8382,6 +8587,41 @@ "ms": "^2.0.0" } }, + "node_modules/hyperid": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/hyperid/-/hyperid-3.3.0.tgz", + "integrity": "sha512-7qhCVT4MJIoEsNcbhglhdmBKb09QtcmJNiIQGq7js/Khf5FtQQ9bzcAuloeqBeee7XD7JqDeve9KNlQya5tSGQ==", + "dev": true, + "dependencies": { + "buffer": "^5.2.1", + "uuid": "^8.3.2", + "uuid-parse": "^1.1.0" + } + }, + "node_modules/hyperid/node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -9359,11 +9599,29 @@ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" }, + "node_modules/lodash.chunk": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/lodash.chunk/-/lodash.chunk-4.2.0.tgz", + "integrity": "sha512-ZzydJKfUHJwHa+hF5X66zLFCBrWn5GeF28OHEr4WVWtNDXlQ/IjWKPBiikqKo2ne0+v6JgCgJ0GzJp8k8bHC7w==", + "dev": true + }, + "node_modules/lodash.clonedeep": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", + "integrity": "sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==", + "dev": true + }, "node_modules/lodash.debounce": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" }, + "node_modules/lodash.flatten": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.flatten/-/lodash.flatten-4.4.0.tgz", + "integrity": "sha512-C5N2Z3DgnnKr0LOpv/hKCgKdb7ZZwafIrsesve6lmzvZIRZRGaZ/l6Q8+2W7NaT+ZwO3fFlSCzCzrDCFdJfZ4g==", + "dev": true + }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", @@ -9473,6 +9731,12 @@ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, + "node_modules/manage-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/manage-path/-/manage-path-2.0.0.tgz", + "integrity": "sha512-NJhyB+PJYTpxhxZJ3lecIGgh4kwIY2RAh44XvAz9UlqthlQwtPBf62uBVR8XaD8CRuSjQ6TnZH2lNJkbLPZM2A==", + "dev": true + }, "node_modules/map-obj": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz", @@ -10398,6 +10662,15 @@ "node": ">= 0.8" } }, + "node_modules/on-net-listen": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/on-net-listen/-/on-net-listen-1.1.2.tgz", + "integrity": "sha512-y1HRYy8s/RlcBvDUwKXSmkODMdx4KSuIvloCnQYJ2LdBBC1asY4HtfhXwe3UWknLakATZDnbzht2Ijw3M1EqFg==", + "dev": true, + "engines": { + "node": ">=9.4.0 || ^8.9.4" + } + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -11098,6 +11371,18 @@ "node": ">=6.0.0" } }, + "node_modules/pretty-bytes": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.6.0.tgz", + "integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==", + "dev": true, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", @@ -11671,6 +11956,12 @@ "jsesc": "bin/jsesc" } }, + "node_modules/reinterval": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reinterval/-/reinterval-1.1.0.tgz", + "integrity": "sha512-QIRet3SYrGp0HUHO88jVskiG6seqUGC5iAG7AwI/BV4ypGcuqk9Du6YQBUOUqm9c8pw1eyLoIaONifRua1lsEQ==", + "dev": true + }, "node_modules/request": { "version": "2.88.2", "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", @@ -11822,6 +12113,12 @@ "node": ">=4" } }, + "node_modules/retimer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/retimer/-/retimer-3.0.0.tgz", + "integrity": "sha512-WKE0j11Pa0ZJI5YIk0nflGI7SQsfl2ljihVy7ogh7DeQSeYAUi0ubZ/yEueGtDfUPk6GH5LRw1hBdLq4IwUBWA==", + "dev": true + }, "node_modules/retry": { "version": "0.12.0", "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", @@ -12851,6 +13148,15 @@ "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", "dev": true }, + "node_modules/timestring": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/timestring/-/timestring-6.0.0.tgz", + "integrity": "sha512-wMctrWD2HZZLuIlchlkE2dfXJh7J2KDI9Dwl+2abPYg0mswQHfOAyQW3jJg1pY5VfttSINZuKcXoB3FGypVklA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/tiny-lru": { "version": "11.0.1", "resolved": "https://registry.npmjs.org/tiny-lru/-/tiny-lru-11.0.1.tgz", @@ -13433,6 +13739,12 @@ "uuid": "dist/bin/uuid" } }, + "node_modules/uuid-parse": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/uuid-parse/-/uuid-parse-1.1.0.tgz", + "integrity": "sha512-OdmXxA8rDsQ7YpNVbKSJkNzTw2I+S5WsbMDnCtIWSQaosNAcWtFuI/YK1TjzUI6nbkgiqEyh8gWngfcv8Asd9A==", + "dev": true + }, "node_modules/v8-compile-cache": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz", @@ -14104,4 +14416,4 @@ } } } -} \ No newline at end of file +} diff --git a/package.json b/package.json index c6b62b6..95772f7 100644 --- a/package.json +++ b/package.json @@ -76,6 +76,7 @@ }, "devDependencies": { "@types/amqplib": "0.10.5", + "@types/autocannon": "7.12.7", "@types/fastify-cors": "2.1.0", "@types/lodash": "4.14.191", "@types/node": "18.19.1", @@ -85,6 +86,7 @@ "@typescript-eslint/eslint-plugin": "5.60.1", "@typescript-eslint/parser": "5.60.1", "@typescript-eslint/typescript-estree": "5.61.0", + "autocannon": "8.0.0", "eslint": "8.44.0", "eslint-config-next": "13.4.8", "eslint-config-prettier": "8.8.0", diff --git a/scripts/benchmark_query.ts b/scripts/benchmark_query.ts new file mode 100644 index 0000000..9b9bd6f --- /dev/null +++ b/scripts/benchmark_query.ts @@ -0,0 +1,59 @@ +import fs from 'fs' +import autocannon, { Instance } from 'autocannon' + +/** + * Prerequisites: + * - Export the accountIds and txIds from the SQLite databases using the following commands + */ +// sqlite3 -noheader -separator $'\n' collector-db/accounts.sqlite3 \ +// "SELECT accountId FROM accounts;" \ +// | jq -R -s 'split("\n")[:-1]' > benchmark-data/accountIds.json + +// sqlite3 -noheader -separator $'\n' collector-db/transactions.sqlite3 \ +// "SELECT txId FROM transactions;" \ +// | jq -R -s 'split("\n")[:-1]' > benchmark-data/txIds.json + +const baseUrl = 'http://127.0.0.1:6001' +// Load accountIds/txIds as a simple array + +const ids: string[] = JSON.parse(fs.readFileSync('./benchmark-data/accountIds.json', 'utf8')) +const endpoint = `/api/account` +const path = `?accountId=` + +// const ids: string[] = JSON.parse(fs.readFileSync('./benchmark-data/txIds.json', 'utf8')) +// const endpoint = `/api/transaction` +// const path = `?txId=` + +console.log('Loaded ids', ids.length) + +function getRandom(arr: T[]): T { + return arr[Math.floor(Math.random() * arr.length)] +} + +// Create autocannon instance and store it +const instance: Instance = autocannon( + { + url: baseUrl, + connections: 100, + duration: 30, + requests: [ + { + method: 'GET', + path: endpoint, + setupRequest: (req) => { + const id = getRandom(ids) + req.path = `${endpoint}${path}${id}` + return req + }, + }, + ], + }, + (err, result) => { + if (err) console.error(err) + else console.log('Done!') + // console.log(autocannon.printResult(result)) + } +) + +// โœ… Optional: show live stats in the terminal +autocannon.track(instance) From 5f93147b64f84697d68a5dce504560356b6e4455 Mon Sep 17 00:00:00 2001 From: jairajdev Date: Fri, 12 Dec 2025 15:14:07 +0800 Subject: [PATCH 2/3] Add comprehensive API benchmark suite with real database data - Implement 25+ benchmark tests covering all major endpoints (accounts, transactions, cycles, receipts, stats) - Add data collector to fetch real IDs from database for realistic load testing - Include both programmatic (autocannon) and CLI-based (Artillery) testing options - Export test data to CSV/JSON for external load testing tools - Add comprehensive documentation with quick start guides and troubleshooting - Configure TypeScript compilation for benchmark suite with separate tsconfig - Add npm scripts for various benchmark scenarios (quick, full, filtered by category) --- benchmark/CLI-QUICKSTART.md | 163 +++++++++ benchmark/CLI-TESTING.md | 483 +++++++++++++++++++++++++++ benchmark/MANUAL-DATA-EXPORT.md | 479 ++++++++++++++++++++++++++ benchmark/QUICKSTART.md | 261 +++++++++++++++ benchmark/README.md | 383 +++++++++++++++++++++ benchmark/SUMMARY.md | 443 ++++++++++++++++++++++++ benchmark/artillery-accounts.yml | 40 +++ benchmark/artillery-combined.yml | 82 +++++ benchmark/artillery-transactions.yml | 48 +++ benchmark/autocannon-advanced.ts | 228 +++++++++++++ benchmark/autocannon-cli.sh | 75 +++++ benchmark/data-collector.ts | 139 ++++++++ benchmark/example.ts | 124 +++++++ benchmark/export-data.sh | 103 ++++++ benchmark/export-test-data.ts | 107 ++++++ benchmark/test-suite.ts | 452 +++++++++++++++++++++++++ package.json | 14 +- tsconfig.benchmark.json | 37 ++ 18 files changed, 3660 insertions(+), 1 deletion(-) create mode 100644 benchmark/CLI-QUICKSTART.md create mode 100644 benchmark/CLI-TESTING.md create mode 100644 benchmark/MANUAL-DATA-EXPORT.md create mode 100644 benchmark/QUICKSTART.md create mode 100644 benchmark/README.md create mode 100644 benchmark/SUMMARY.md create mode 100644 benchmark/artillery-accounts.yml create mode 100644 benchmark/artillery-combined.yml create mode 100644 benchmark/artillery-transactions.yml create mode 100644 benchmark/autocannon-advanced.ts create mode 100755 benchmark/autocannon-cli.sh create mode 100644 benchmark/data-collector.ts create mode 100644 benchmark/example.ts create mode 100755 benchmark/export-data.sh create mode 100644 benchmark/export-test-data.ts create mode 100644 benchmark/test-suite.ts create mode 100644 tsconfig.benchmark.json diff --git a/benchmark/CLI-QUICKSTART.md b/benchmark/CLI-QUICKSTART.md new file mode 100644 index 0000000..221c621 --- /dev/null +++ b/benchmark/CLI-QUICKSTART.md @@ -0,0 +1,163 @@ +# CLI Load Testing - Quick Start + +Load test your API from the command line using exported test data! + +## ๐Ÿš€ Quick Start (3 Steps) + +### 1. Export Test Data + +```bash +npm run benchmark:export-data +``` + +This creates `benchmark-data/` with CSV/JSON files containing 100000+ real IDs. + +### 2. Choose Your Tool + +#### **Option A: Artillery (Easiest for CLI)** + +```bash +# Install globally (one time) +npm install -g artillery + +# Test accounts with real IDs +npm run benchmark:artillery-accounts + +# Test transactions with real IDs +npm run benchmark:artillery-transactions + +# Test combined workload (50% tx, 30% accounts, etc.) +npm run benchmark:artillery-combined +``` + +#### **Option B: autocannon (Advanced, Random IDs)** + +```bash +# Rotates through different IDs automatically +npm run benchmark:autocannon-advanced + +# Or specific test: +npm run benchmark:autocannon-advanced accounts # Accounts only +npm run benchmark:autocannon-advanced transactions # Transactions only +npm run benchmark:autocannon-advanced mixed # 50/50 mix +``` + +#### **Option C: autocannon CLI (Simple, Single ID)** + +```bash +# Pick random ID and test +TX_ID=$(tail -n +2 benchmark-data/transactions.csv | shuf -n 1) +autocannon -c 100 -d 30 "http://127.0.0.1:6001/api/transaction?txId=$TX_ID" +``` + +## ๐Ÿ“Š What You Get + +### Artillery Output + +``` +Summary report @ 16:30:15 +Scenarios launched: 12000 +Requests completed: 12000 +Mean response time: 65 ms +p95: 142 ms +p99: 198 ms +Errors: 0 +``` + +### autocannon-advanced Output + +``` +๐Ÿ”ฅ Test 1: Account Queries (Random IDs) +[Progress bar] +โœ“ Account Test Results: + Requests/sec: 12472.80 + Latency (avg): 8.25ms + Latency (p95): 13.45ms + Latency (p99): 18.23ms + Total requests: 374184 + Errors: 0 +``` + +## ๐Ÿ“ Files Created + +``` +benchmark/ +โ”œโ”€โ”€ artillery-accounts.yml # Artillery config for accounts +โ”œโ”€โ”€ artillery-transactions.yml # Artillery config for transactions +โ”œโ”€โ”€ artillery-combined.yml # Combined workload +โ”œโ”€โ”€ autocannon-advanced.ts # Advanced autocannon (rotates IDs) +โ”œโ”€โ”€ autocannon-cli.sh # Shell script examples +โ”œโ”€โ”€ export-test-data.ts # Data export script +โ””โ”€โ”€ CLI-TESTING.md # Full documentation + +benchmark-data/ # Created after export +โ”œโ”€โ”€ test-data.json # All data in JSON +โ”œโ”€โ”€ accounts.csv # 100000 account IDs +โ”œโ”€โ”€ transactions.csv # 100000 transaction IDs +โ”œโ”€โ”€ receipts.csv # 100000 receipt IDs +โ”œโ”€โ”€ cycles.csv # 100000 cycle numbers +โ””โ”€โ”€ combined.csv # All in one CSV +``` + +## ๐ŸŽฏ Use Cases + +**Quick sanity check:** + +```bash +npm run benchmark:export-data +npm run benchmark:artillery-accounts +``` + +**Production readiness:** + +```bash +artillery run --target https://prod.example.com benchmark/artillery-combined.yml +``` + +**Stress test with rotating IDs:** + +```bash +npm run benchmark:autocannon-advanced mixed +``` + +## ๐Ÿ“– Full Documentation + +- **[CLI-TESTING.md](./CLI-TESTING.md)** - Complete guide with all options +- **[QUICKSTART.md](./QUICKSTART.md)** - Original benchmark suite guide +- **[README.md](./README.md)** - Full API benchmark documentation + +## ๐Ÿ’ก Pro Tips + +1. **Re-export data regularly:** + + ```bash + npm run benchmark:export-data # Gets latest IDs from DB + ``` + +2. **Save results:** + + ```bash + artillery run -o results.json benchmark/artillery-combined.yml + artillery report results.json # HTML report + ``` + +3. **Custom target:** + + ```bash + artillery run --target http://staging.example.com benchmark/artillery-accounts.yml + ``` + +4. **More load:** + ```bash + artillery run --duration 300 --arrival-rate 100000 benchmark/artillery-combined.yml + ``` + +--- + +**Your results were amazing! ๐ŸŽ‰** + +- Transaction queries: **12,473 req/s** +- Account queries: **11,977 req/s** +- Total data: **8,311 req/s** + +All with p99 latency under 20ms! ๐Ÿ”ฅ diff --git a/benchmark/CLI-TESTING.md b/benchmark/CLI-TESTING.md new file mode 100644 index 0000000..af3ec61 --- /dev/null +++ b/benchmark/CLI-TESTING.md @@ -0,0 +1,483 @@ +# CLI-Based Load Testing Guide + +This guide shows how to run load tests directly from the command line using exported test data files, without needing to write custom scripts. + +## Quick Start + +### 1. Export Test Data + +First, export your database IDs to files: + +```bash +npm run benchmark:export-data +``` + +This creates: + +``` +benchmark-data/ +โ”œโ”€โ”€ test-data.json # All data in JSON format +โ”œโ”€โ”€ accounts.csv # Account IDs (one per line) +โ”œโ”€โ”€ transactions.csv # Transaction IDs (one per line) +โ”œโ”€โ”€ receipts.csv # Receipt IDs (one per line) +โ”œโ”€โ”€ cycles.csv # Cycle numbers (one per line) +โ””โ”€โ”€ combined.csv # All data in one CSV +``` + +### 2. Choose Your Tool + +#### **Option A: Artillery (Recommended for CLI)** + +Best for: Data-driven testing, realistic scenarios, HTML reports + +#### **Option B: autocannon** + +Best for: Raw performance, Node.js apps, quick tests + +--- + +## Artillery CLI Testing + +### Installation + +```bash +npm install -g artillery +``` + +### Pre-configured Tests + +#### Test Accounts Only + +```bash +npm run benchmark:artillery-accounts +# or +artillery run benchmark/artillery-accounts.yml +``` + +**What it does:** + +- Loads 100000 account IDs from CSV +- Tests: `GET /api/account?accountId={randomId}` +- Phases: 60s warmup โ†’ 120s sustained load โ†’ 60s spike +- Connections: 50 โ†’ 100 โ†’ 200 per second + +#### Test Transactions Only + +```bash +npm run benchmark:artillery-transactions +# or +artillery run benchmark/artillery-transactions.yml +``` + +**What it does:** + +- Loads 100000 transaction IDs from CSV +- 70% regular queries, 30% with balance changes +- Same load phases as accounts + +#### Test Combined Workload + +```bash +npm run benchmark:artillery-combined +# or +artillery run benchmark/artillery-combined.yml +``` + +**What it does:** + +- Uses combined CSV with all data types +- 50% transactions, 30% accounts, 10% receipts, 10% cycles +- Simulates realistic mixed traffic + +### Custom Artillery Options + +#### Change Target URL + +```bash +artillery run --target http://production-server.com benchmark/artillery-accounts.yml +``` + +#### Adjust Load Parameters + +```bash +# Run for 5 minutes with 200 req/s +artillery run --duration 300 --arrival-rate 200 benchmark/artillery-accounts.yml +``` + +#### Override Environment Variables + +```bash +API_URL=http://127.0.0.1:3000 artillery run benchmark/artillery-accounts.yml +``` + +#### Generate HTML Report + +```bash +artillery run --output results.json benchmark/artillery-combined.yml +artillery report results.json --output report.html +open report.html # Beautiful charts and graphs! +``` + +### Artillery Configuration + +Edit the YAML files to customize: + +```yaml +config: + target: 'http://127.0.0.1:6001' + phases: + - duration: 60 # Test for 60 seconds + arrivalRate: 100 # 100 users per second + name: 'Load test' + + payload: + path: '../benchmark-data/accounts.csv' + order: 'random' # or "sequence" + skipHeader: true + + ensure: # Performance thresholds (test fails if exceeded) + maxErrorRate: 1 # Max 1% error rate + p95: 200 # 95th percentile < 200ms + p99: 500 # 99th percentile < 500ms +``` + +--- + +## autocannon CLI Testing + +### Simple CLI Usage + +#### Test Single Account ID + +```bash +# Pick a random account from the CSV +ACCOUNT_ID=$(tail -n +2 benchmark-data/accounts.csv | shuf -n 1) + +autocannon \ + -c 100 \ # 100 concurrent connections + -d 30 \ # Duration: 30 seconds + -m GET \ # HTTP method + "http://127.0.0.1:6001/api/account?accountId=$ACCOUNT_ID" +``` + +#### Test Single Transaction ID + +```bash +TX_ID=$(tail -n +2 benchmark-data/transactions.csv | shuf -n 1) + +autocannon \ + -c 100 \ + -d 30 \ + "http://127.0.0.1:6001/api/transaction?txId=$TX_ID" +``` + +#### Save Results to JSON + +```bash +TX_ID=$(tail -n +2 benchmark-data/transactions.csv | shuf -n 1) + +autocannon \ + -c 100 \ + -d 30 \ + -j \ # JSON output + "http://127.0.0.1:6001/api/transaction?txId=$TX_ID" \ + > results.json +``` + +### Shell Script (Multiple IDs) + +Run the provided shell script: + +```bash +./benchmark/autocannon-cli.sh +``` + +This tests: + +1. Random account ID query +2. Random transaction ID query +3. Shows examples of multi-URL testing + +### Advanced autocannon (Programmatic) + +For testing with **rotating/random IDs from the CSV**, use the advanced script: + +```bash +# Test all endpoints +npm run benchmark:autocannon-advanced + +# Test only accounts (rotates through all 100000 account IDs) +npm run benchmark:autocannon-advanced accounts + +# Test only transactions (rotates through all 100000 tx IDs) +npm run benchmark:autocannon-advanced transactions + +# Test mixed load (50% accounts, 50% transactions, random IDs) +npm run benchmark:autocannon-advanced mixed +``` + +**What makes this "advanced":** + +- Each request uses a **different random ID** from your data +- Simulates real user traffic patterns +- Real-time progress counter +- Detailed breakdown by query type + +### Direct autocannon CLI with Options + +```bash +autocannon \ + -c 200 \ # 200 concurrent connections + -d 60 \ # 60 seconds + -p 10 \ # Pipelining: 10 requests per connection + -m GET \ # HTTP method + -H "Accept: application/json" \ # Custom headers + --on-port 6001 \ # Wait for port to be ready + http://127.0.0.1:6001/api/account?accountId=abc123 +``` + +--- + +## Comparison: Artillery vs autocannon CLI + +| Feature | Artillery | autocannon CLI | +| ---------------------- | ----------------- | ------------------- | +| **CSV/JSON data** | โœ… Native support | โš ๏ธ Manual scripting | +| **Random IDs** | โœ… Built-in | โš ๏ธ Needs script | +| **Multiple scenarios** | โœ… Easy | โŒ Complex | +| **HTML reports** | โœ… Beautiful | โŒ JSON only | +| **Performance** | Good | โšก Excellent | +| **Ease of use** | โญโญโญโญโญ | โญโญโญ | +| **Best for** | Realistic testing | Raw benchmarks | + +**Recommendation:** + +- Use **Artillery** for data-driven testing with your exported CSVs +- Use **autocannon-advanced** (Node script) for rotating through many IDs +- Use **autocannon CLI** for quick one-off tests + +--- + +## Common Workflows + +### Workflow 1: Daily Performance Check + +```bash +# Export latest data +npm run benchmark:export-data + +# Run combined test +artillery run --output daily-$(date +%Y%m%d).json benchmark/artillery-combined.yml + +# Generate report +artillery report daily-$(date +%Y%m%d).json +``` + +### Workflow 2: Load Test Specific Endpoint + +```bash +# Export data +npm run benchmark:export-data + +# Edit artillery-accounts.yml to adjust load +# Then run: +artillery run benchmark/artillery-accounts.yml +``` + +### Workflow 3: Stress Test with Growing Load + +```bash +# Edit artillery config with ramping phases: +# phases: +# - duration: 60, arrivalRate: 50 +# - duration: 60, arrivalRate: 100 +# - duration: 60, arrivalRate: 200 +# - duration: 60, arrivalRate: 500 + +artillery run benchmark/artillery-combined.yml +``` + +### Workflow 4: Quick autocannon Test + +```bash +# Export data (if not already done) +npm run benchmark:export-data + +# Run advanced script with rotating IDs +npm run benchmark:autocannon-advanced transactions +``` + +--- + +## Understanding Results + +### Artillery Output + +``` +Summary report @ 16:30:15 +Scenarios launched: 12000 +Scenarios completed: 12000 +Requests completed: 12000 +Mean response time: 65.3 ms +p50: 58 ms +p95: 142 ms +p99: 198 ms +Errors: 0 +``` + +**Good signs:** + +- โœ… p95 < 200ms +- โœ… p99 < 500ms +- โœ… Errors: 0 +- โœ… All scenarios completed + +### autocannon Output + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Stat โ”‚ 2.5% โ”‚ 50% โ”‚ 97.5% โ”‚ 99% โ”‚ Avg โ”‚ Stdev โ”‚ Max โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ Latency โ”‚ 5 ms โ”‚ 8 ms โ”‚ 18 ms โ”‚ 23ms โ”‚ 9.25 ms โ”‚ 4.12 ms โ”‚ 156 ms โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Stat โ”‚ 1% โ”‚ 2.5% โ”‚ 50% โ”‚ 97.5% โ”‚ Avg โ”‚ Stdev โ”‚ Min โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ Req/Sec โ”‚ 11,455 โ”‚ 11,455 โ”‚ 12,543 โ”‚ 12,799 โ”‚ 12,473 โ”‚ 389.23 โ”‚ 11,448 โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + +12k requests in 30.03s, 15.2 MB read +``` + +**What to look for:** + +- **Req/Sec**: Higher is better (your 12k is excellent!) +- **Latency p99**: Should be < 500ms +- **Max latency**: Check for outliers +- **Errors**: Should be 0 + +--- + +## Troubleshooting + +### "No such file: benchmark-data/accounts.csv" + +```bash +npm run benchmark:export-data +``` + +### "Connection refused" + +Make sure server is running: + +```bash +npm run server +``` + +### "Too many open files" (macOS) + +```bash +ulimit -n 10000 +``` + +### Artillery not found + +```bash +npm install -g artillery +``` + +### Test data is stale + +Re-export with fresh data: + +```bash +npm run benchmark:export-data +``` + +--- + +## Advanced Tips + +### 1. CI/CD Integration + +**GitHub Actions:** + +```yaml +- name: Export test data + run: npm run benchmark:export-data + +- name: Run load test + run: artillery run --output results.json benchmark/artillery-combined.yml + +- name: Check thresholds + run: | + if grep -q '"errors": [^0]' results.json; then + echo "Load test failed!" + exit 1 + fi +``` + +### 2. Compare Before/After + +```bash +# Before optimization +npm run benchmark:export-data +artillery run -o before.json benchmark/artillery-accounts.yml + +# After optimization +artillery run -o after.json benchmark/artillery-accounts.yml + +# Compare +artillery report before.json --output before.html +artillery report after.json --output after.html +``` + +### 3. Custom Artillery Processors + +Create `benchmark/processors.js`: + +```javascript +module.exports = { + logRequest: function (requestParams, context, ee, next) { + console.log(`Testing with ID: ${context.vars.accountId}`) + return next() + }, +} +``` + +Then in YAML: + +```yaml +config: + processor: './benchmark/processors.js' + +scenarios: + - flow: + - function: 'logRequest' + - get: + url: '/api/account?accountId={{ accountId }}' +``` + +--- + +## Summary + +โœ… **For CLI testing with your data:** + +1. `npm run benchmark:export-data` (once) +2. `npm run benchmark:artillery-combined` (Artillery - easiest) +3. `npm run benchmark:autocannon-advanced` (autocannon - most flexible) + +โœ… **For one-off tests:** + +```bash +TX_ID=$(tail -n +2 benchmark-data/transactions.csv | shuf -n 1) +autocannon -c 100 -d 30 "http://127.0.0.1:6001/api/transaction?txId=$TX_ID" +``` + +โœ… **For production monitoring:** + +```bash +artillery run --environment production benchmark/artillery-combined.yml +``` diff --git a/benchmark/MANUAL-DATA-EXPORT.md b/benchmark/MANUAL-DATA-EXPORT.md new file mode 100644 index 0000000..5f92deb --- /dev/null +++ b/benchmark/MANUAL-DATA-EXPORT.md @@ -0,0 +1,479 @@ +# Manual Data Export with SQLite3 + +Export test data directly from SQLite databases using SQL commands - no Node.js required! + +## Quick Start + +```bash +# Create output directory +mkdir -p benchmark-data + +# Export accounts +sqlite3 -header -csv collector-db/accounts.db \ + "SELECT accountId FROM accounts ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/accounts.csv + +# Export transactions +sqlite3 -header -csv collector-db/transactions.db \ + "SELECT txId FROM transactions ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/transactions.csv + +# Export receipts +sqlite3 -header -csv collector-db/receipts.db \ + "SELECT receiptId FROM receipts ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/receipts.csv + +# Export cycles +sqlite3 -header -csv collector-db/cycles.db \ + "SELECT counter FROM cycles ORDER BY counter DESC LIMIT 100000;" \ + > benchmark-data/cycles.csv +``` + +Done! Now use with Artillery or autocannon. + +--- + +## Detailed Guide + +### 1. Export Accounts + +```bash +# Basic export (100000 random accounts) +sqlite3 -header -csv collector-db/accounts.db \ + "SELECT accountId FROM accounts ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/accounts.csv + +# Latest 100000 accounts +sqlite3 -header -csv collector-db/accounts.db \ + "SELECT accountId FROM accounts ORDER BY timestamp DESC LIMIT 100000;" \ + > benchmark-data/accounts.csv + +# Filter by account type +sqlite3 -header -csv collector-db/accounts.db \ + "SELECT accountId FROM accounts WHERE accountType = 9 ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/user-accounts.csv +``` + +### 2. Export Transactions + +```bash +# Basic export (100000 random transactions) +sqlite3 -header -csv collector-db/transactions.db \ + "SELECT txId FROM transactions ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/transactions.csv + +# Latest transactions +sqlite3 -header -csv collector-db/transactions.db \ + "SELECT txId FROM transactions ORDER BY timestamp DESC LIMIT 100000;" \ + > benchmark-data/transactions.csv + +# Specific transaction type (e.g., transfers) +sqlite3 -header -csv collector-db/transactions.db \ + "SELECT txId FROM transactions WHERE txType = 'transfer' ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/transfer-txs.csv + +# High-value transactions +sqlite3 -header -csv collector-db/transactions.db \ + "SELECT txId FROM transactions WHERE CAST(txFee AS INTEGER) > 0 ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/fee-txs.csv +``` + +### 3. Export Receipts + +```bash +# Basic export +sqlite3 -header -csv collector-db/receipts.db \ + "SELECT receiptId FROM receipts ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/receipts.csv + +# Latest receipts +sqlite3 -header -csv collector-db/receipts.db \ + "SELECT receiptId FROM receipts ORDER BY cycle DESC, timestamp DESC LIMIT 100000;" \ + > benchmark-data/receipts.csv + +# Receipts from specific cycle range +sqlite3 -header -csv collector-db/receipts.db \ + "SELECT receiptId FROM receipts WHERE cycle BETWEEN 80000 AND 81000 ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/receipts-recent.csv +``` + +### 4. Export Cycles + +```bash +# Latest 100000 cycles +sqlite3 -header -csv collector-db/cycles.db \ + "SELECT counter FROM cycles ORDER BY counter DESC LIMIT 100000;" \ + > benchmark-data/cycles.csv + +# Cycles with markers +sqlite3 -header -csv collector-db/cycles.db \ + "SELECT counter, marker FROM cycles ORDER BY counter DESC LIMIT 100000;" \ + > benchmark-data/cycles-with-markers.csv + +# Specific cycle range +sqlite3 -header -csv collector-db/cycles.db \ + "SELECT counter FROM cycles WHERE counter BETWEEN 80000 AND 81000;" \ + > benchmark-data/cycles-range.csv +``` + +### 5. Combined Export + +Create a combined CSV with all data types: + +```bash +# Create headers +echo "accountId,txId,receiptId,cycleNumber" > benchmark-data/combined.csv + +# Use a SQL join or paste command +paste -d',' \ + <(sqlite3 collector-db/accounts.db "SELECT accountId FROM accounts ORDER BY RANDOM() LIMIT 100000;") \ + <(sqlite3 collector-db/transactions.db "SELECT txId FROM transactions ORDER BY RANDOM() LIMIT 100000;") \ + <(sqlite3 collector-db/receipts.db "SELECT receiptId FROM receipts ORDER BY RANDOM() LIMIT 100000;") \ + <(sqlite3 collector-db/cycles.db "SELECT counter FROM cycles ORDER BY counter DESC LIMIT 100000;") \ + >> benchmark-data/combined.csv +``` + +--- + +## SQLite3 CLI Options Explained + +```bash +sqlite3 [OPTIONS] database.db "SQL QUERY" +``` + +**Key Options:** + +- `-header` - Include column names as first row +- `-csv` - Output in CSV format (default is pipe-separated) +- `-column` - Column-aligned output (for viewing) +- `-json` - Output as JSON array +- `-line` - One value per line + +**Examples:** + +```bash +# CSV with header (recommended for Artillery) +sqlite3 -header -csv collector-db/accounts.db "SELECT accountId FROM accounts LIMIT 10;" + +# JSON output +sqlite3 -json collector-db/accounts.db "SELECT accountId FROM accounts LIMIT 10;" + +# Pretty table format (for viewing, not exporting) +sqlite3 -column -header collector-db/accounts.db "SELECT accountId FROM accounts LIMIT 10;" + +# Without header (if Artillery/autocannon expects no header) +sqlite3 -csv collector-db/accounts.db "SELECT accountId FROM accounts LIMIT 10;" +``` + +--- + +## Advanced Queries + +### Export Accounts with Metadata + +```bash +sqlite3 -header -csv collector-db/accounts.db " + SELECT + accountId, + accountType, + timestamp, + cycleNumber + FROM accounts + ORDER BY RANDOM() + LIMIT 100000; +" > benchmark-data/accounts-detailed.csv +``` + +### Export Transactions by Time Range + +```bash +sqlite3 -header -csv collector-db/transactions.db " + SELECT txId + FROM transactions + WHERE timestamp > strftime('%s', 'now', '-7 days') * 1000 + ORDER BY RANDOM() + LIMIT 100000; +" > benchmark-data/transactions-last-7days.csv +``` + +### Export Active User Accounts Only + +```bash +sqlite3 -header -csv collector-db/accounts.db " + SELECT DISTINCT accountId + FROM accounts + WHERE accountType = 9 + AND data LIKE '%balance%' + ORDER BY RANDOM() + LIMIT 100000; +" > benchmark-data/active-user-accounts.csv +``` + +### Export Failed Transactions + +```bash +sqlite3 -header -csv collector-db/receipts.db " + SELECT receiptId + FROM receipts + WHERE result LIKE '%fail%' + ORDER BY RANDOM() + LIMIT 100000; +" > benchmark-data/failed-txs.csv +``` + +--- + +## One-Liner Shell Script + +Create a script to export all data at once: + +```bash +#!/bin/bash +# export-data.sh + +DEST="benchmark-data" +mkdir -p "$DEST" + +echo "Exporting test data..." + +# Accounts +sqlite3 -header -csv collector-db/accounts.db \ + "SELECT accountId FROM accounts ORDER BY RANDOM() LIMIT 100000;" \ + > "$DEST/accounts.csv" +echo "โœ“ Accounts: $(wc -l < "$DEST/accounts.csv") rows" + +# Transactions +sqlite3 -header -csv collector-db/transactions.db \ + "SELECT txId FROM transactions ORDER BY RANDOM() LIMIT 100000;" \ + > "$DEST/transactions.csv" +echo "โœ“ Transactions: $(wc -l < "$DEST/transactions.csv") rows" + +# Receipts +sqlite3 -header -csv collector-db/receipts.db \ + "SELECT receiptId FROM receipts ORDER BY RANDOM() LIMIT 100000;" \ + > "$DEST/receipts.csv" +echo "โœ“ Receipts: $(wc -l < "$DEST/receipts.csv") rows" + +# Cycles +sqlite3 -header -csv collector-db/cycles.db \ + "SELECT counter FROM cycles ORDER BY counter DESC LIMIT 100000;" \ + > "$DEST/cycles.csv" +echo "โœ“ Cycles: $(wc -l < "$DEST/cycles.csv") rows" + +echo "Done! Files in $DEST/" +``` + +Make executable and run: + +```bash +chmod +x export-data.sh +./export-data.sh +``` + +--- + +## Verify Exported Data + +Check what you exported: + +```bash +# Count rows (subtract 1 for header) +wc -l benchmark-data/*.csv + +# View first 5 rows +head -5 benchmark-data/accounts.csv + +# View random sample +tail -n +2 benchmark-data/transactions.csv | shuf -n 5 + +# Check for duplicates +tail -n +2 benchmark-data/accounts.csv | sort | uniq -d +``` + +--- + +## Use with Load Testing Tools + +### With Artillery + +```yaml +# artillery-test.yml +config: + target: 'http://127.0.0.1:6001' + payload: + path: 'benchmark-data/accounts.csv' + fields: ['accountId'] + order: 'random' + skipHeader: true + +scenarios: + - flow: + - get: + url: '/api/account?accountId={{ accountId }}' +``` + +Run: + +```bash +artillery run artillery-test.yml +``` + +### With autocannon CLI + +```bash +# Pick random ID from exported CSV +ACCOUNT_ID=$(tail -n +2 benchmark-data/accounts.csv | shuf -n 1) + +autocannon -c 100 -d 30 \ + "http://127.0.0.1:6001/api/account?accountId=$ACCOUNT_ID" +``` + +--- + +## Troubleshooting + +### "database is locked" + +The collector is running and has the DB locked. Either: + +- Stop the collector temporarily +- Use `.mode csv` in interactive mode (doesn't lock) + +```bash +# Interactive mode (doesn't lock DB) +sqlite3 collector-db/accounts.db +.mode csv +.headers on +.output benchmark-data/accounts.csv +SELECT accountId FROM accounts ORDER BY RANDOM() LIMIT 100000; +.quit +``` + +### "no such table: accounts" + +Check table name: + +```bash +sqlite3 collector-db/accounts.db ".tables" +``` + +### "column not found" + +Check column names: + +```bash +sqlite3 collector-db/accounts.db ".schema accounts" +``` + +### Empty CSV + +Check if database has data: + +```bash +sqlite3 collector-db/accounts.db "SELECT COUNT(*) FROM accounts;" +``` + +--- + +## Pro Tips + +### 1. Export Only Unique Values + +```bash +sqlite3 -header -csv collector-db/accounts.db \ + "SELECT DISTINCT accountId FROM accounts ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/accounts.csv +``` + +### 2. Sample from Different Cycles + +```bash +sqlite3 -header -csv collector-db/transactions.db " + SELECT txId + FROM transactions + WHERE cycleNumber IN ( + SELECT DISTINCT cycleNumber + FROM transactions + ORDER BY RANDOM() + LIMIT 10 + ) + LIMIT 100000; +" > benchmark-data/transactions-multi-cycle.csv +``` + +### 3. Export to JSON for Other Tools + +```bash +sqlite3 -json collector-db/accounts.db \ + "SELECT accountId FROM accounts ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/accounts.json +``` + +### 4. Stratified Sampling + +```bash +# 100 accounts from each of 5 cycles +sqlite3 -header -csv collector-db/accounts.db " + WITH cycles AS ( + SELECT DISTINCT cycleNumber + FROM accounts + ORDER BY cycleNumber DESC + LIMIT 5 + ) + SELECT a.accountId + FROM accounts a + INNER JOIN cycles c ON a.cycleNumber = c.cycleNumber + GROUP BY a.cycleNumber + HAVING COUNT(*) <= 100 + ORDER BY RANDOM() + LIMIT 100000; +" > benchmark-data/accounts-stratified.csv +``` + +--- + +## Performance Comparison + +| Method | Speed | Complexity | Node.js Required | +| ----------------------------- | -------------- | ---------- | ---------------- | +| SQLite3 CLI | โšกโšกโšก Fastest | Simple | โŒ No | +| npm run benchmark:export-data | โšกโšก Fast | Easy | โœ… Yes | +| Custom Script | โšก Medium | Complex | โœ… Yes | + +**Recommendation:** Use SQLite3 CLI for quick exports, npm script for automated workflows. + +--- + +## Summary + +**Quick export (copy-paste ready):** + +```bash +mkdir -p benchmark-data + +sqlite3 -header -csv collector-db/accounts.db \ + "SELECT accountId FROM accounts ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/accounts.csv + +sqlite3 -header -csv collector-db/transactions.db \ + "SELECT txId FROM transactions ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/transactions.csv + +sqlite3 -header -csv collector-db/receipts.db \ + "SELECT receiptId FROM receipts ORDER BY RANDOM() LIMIT 100000;" \ + > benchmark-data/receipts.csv + +sqlite3 -header -csv collector-db/cycles.db \ + "SELECT counter FROM cycles ORDER BY counter DESC LIMIT 100000;" \ + > benchmark-data/cycles.csv + +echo "โœ“ Data exported to benchmark-data/" +``` + +**Then use with Artillery:** + +```bash +artillery run benchmark/artillery-accounts.yml +``` + +Perfect for CI/CD, cron jobs, or quick manual testing! ๐Ÿš€ diff --git a/benchmark/QUICKSTART.md b/benchmark/QUICKSTART.md new file mode 100644 index 0000000..baedb17 --- /dev/null +++ b/benchmark/QUICKSTART.md @@ -0,0 +1,261 @@ +# Benchmark Suite - Quick Start Guide + +## What Was Created + +A comprehensive API load testing suite with real database data: + +``` +benchmark/ +โ”œโ”€โ”€ README.md # Full documentation +โ”œโ”€โ”€ QUICKSTART.md # This file +โ”œโ”€โ”€ data-collector.ts # Collects real IDs from your database +โ”œโ”€โ”€ test-suite.ts # Main benchmark suite (25+ tests) +โ””โ”€โ”€ example.ts # Simple example for quick tests +``` + +## Prerequisites + +โœ… **Server must be running** +```bash +npm run server +``` + +โœ… **Database must have data** +```bash +npm run collector +``` + +## Run Your First Benchmark + +### Option 1: Simple Example (Recommended First) + +Test the 3 most critical endpoints in 30 seconds: + +```bash +npm run benchmark:example +``` + +**Output:** +``` +Quick Benchmark - Testing Critical Endpoints + +Collecting test data from database... + โœ“ Collected 50 account IDs + โœ“ Collected 50 transaction IDs + +Testing: GET /api/transaction?txId=... +Transaction Query Results: + Requests/sec: 1523.45 + Latency p99: 198.23ms + +Testing: GET /api/account?accountId=... +Account Query Results: + Requests/sec: 1845.67 + Latency p99: 145.89ms + +Testing: GET /totalData +Total Data Results: + Requests/sec: 2145.23 + Latency p99: 89.45ms + +โœ… All endpoints performing well +``` + +### Option 2: Quick Test Suite (5-10 minutes) + +Test transactions and accounts only: + +```bash +npm run benchmark:quick +``` + +### Option 3: Full Test Suite (20-30 minutes) + +Run all 25+ tests across all endpoints: + +```bash +npm run benchmark +``` + +## Understanding the Results + +### Good Performance โœ… +``` +Requests/sec: 1523.45 # > 1000 is good for query endpoints +Latency (avg): 65.32ms # Average response time +Latency (p95): 142.67ms # 95% of requests faster than this +Latency (p99): 198.23ms # 99% faster (should be < 500ms) +Throughput: 2.45 MB/s # Data transfer rate +Errors: 0 # Should always be 0 +``` + +### Performance Issues โš ๏ธ +``` +Requests/sec: 124.45 # Too low - investigate bottleneck +Latency (p99): 2845.67ms # Too high - needs optimization +Errors: 15 # Server errors - check logs +``` + +## Common Commands + +```bash +# 1. Verify database has data +npm run benchmark:collect-data + +# 2. Quick 30-second test +npm run benchmark:example + +# 3. Test specific categories +npm run benchmark:quick # Transactions & accounts +npm run benchmark:stats # Statistics endpoints +npm run benchmark:cycles # Cycle endpoints + +# 4. Full benchmark suite +npm run benchmark + +# 5. Custom options +npm run benchmark -- --url=http://127.0.0.1:3000 # Different server +npm run benchmark -- --sample=200 # More test data +npm run benchmark -- transaction # Filter by keyword +``` + +## What Each Test Does + +### Real Data Testing +The benchmark suite: +1. **Connects to your database** and collects 100000 real IDs +2. **Randomizes requests** to simulate different users +3. **Tests actual query patterns** (by ID, by range, paginated, etc.) +4. **Measures performance** under realistic load + +### Test Coverage +- โœ… 25+ different endpoint configurations +- โœ… 50-150 concurrent connections per test +- โœ… 10-30 second duration per test +- โœ… Real accountIds, txIds, cycle numbers +- โœ… Cached and uncached queries + +## Interpreting Results + +### Summary Table +``` +BENCHMARK SUMMARY +โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• +Test Name | Req/s | Avg(ms) | P95(ms) +โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ +Transaction - By TxId | 1523.45 | 65.32 | 142.67 +Account - By ID | 1845.67 | 54.23 | 145.89 +Total Data Endpoint | 2145.23 | 46.32 | 89.45 +Stats - Validator (cached) | 3245.89 | 30.12 | 67.23 +``` + +**What to look for:** +1. **Req/s** - Higher is better (target > 1000 for queries) +2. **P95/P99** - These matter more than average (target < 200ms) +3. **Consistency** - Similar numbers across runs means stable +4. **Errors** - Should always be 0 + +### Performance Rankings + +After tests complete, you'll see: + +``` +โš ๏ธ Slowest endpoints (p99 latency): + 1. Transaction - By TxId with Balance Changes: 456.78ms + 2. Account - Paginated: 234.56ms + 3. Cycle Info - Range Query: 198.23ms + +โœ“ Highest throughput endpoints: + 1. Port Endpoint: 5234.12 req/s + 2. Total Data Endpoint: 2145.23 req/s + 3. Stats - Validator (cached): 1845.67 req/s +``` + +## Troubleshooting + +### "No test data found" +```bash +โš ๏ธ Warning: No accounts found in database +``` +**Solution:** Run collector first: `npm run collector` + +### "Connection refused" +```bash +Error: connect ECONNREFUSED 127.0.0.1:6001 +``` +**Solution:** Start server: `npm run server` + +### "Too many errors" +```bash +Errors: 150 +``` +**Solution:** +- Check server logs for errors +- Verify database is not locked +- Reduce connections: Edit test to use fewer concurrent connections + +## Next Steps + +1. **Establish Baseline** + ```bash + npm run benchmark > baseline-$(date +%Y%m%d).txt + ``` + +2. **Make Optimizations** + - Add database indexes + - Enable caching + - Optimize queries + +3. **Re-run Benchmarks** + ```bash + npm run benchmark > after-optimization-$(date +%Y%m%d).txt + ``` + +4. **Compare Results** + ```bash + diff baseline-*.txt after-optimization-*.txt + ``` + +## Example Optimization Workflow + +```bash +# Day 1: Establish baseline +npm run server & +npm run benchmark > results-day1.txt + +# Day 2: After adding indexes +npm run server & +npm run benchmark > results-day2.txt + +# Compare +grep "Transaction - By TxId" results-day1.txt +grep "Transaction - By TxId" results-day2.txt +``` + +## Need Help? + +- ๐Ÿ“– Full docs: [benchmark/README.md](./README.md) +- ๐Ÿ”ง Customize tests: Edit [benchmark/test-suite.ts](./test-suite.ts) +- ๐Ÿ’ก Simple example: See [benchmark/example.ts](./example.ts) + +## Quick Reference Card + +| Command | Duration | Tests | Use Case | +|---------|----------|-------|----------| +| `npm run benchmark:example` | 30s | 3 | Quick health check | +| `npm run benchmark:quick` | 5-10m | ~10 | Pre-deployment test | +| `npm run benchmark:stats` | 5-10m | 8 | Stats performance | +| `npm run benchmark` | 20-30m | 25+ | Full analysis | +| `npm run benchmark:collect-data` | 10s | 0 | Verify database | + +--- + +**Ready to start?** + +```bash +# Make sure server is running +npm run server + +# In another terminal, run quick benchmark +npm run benchmark:example +``` diff --git a/benchmark/README.md b/benchmark/README.md new file mode 100644 index 0000000..dfb8766 --- /dev/null +++ b/benchmark/README.md @@ -0,0 +1,383 @@ +# Liberdus Explorer API Benchmark Suite + +Comprehensive load testing and benchmarking suite for the Liberdus Explorer API using real database data. + +## Features + +- **Real Data Testing**: Uses actual accountIds, txIds, cycle numbers, and markers from your database +- **Comprehensive Coverage**: Tests all major API endpoints (accounts, transactions, cycles, receipts, stats) +- **Realistic Load Patterns**: Randomized queries simulating real user behavior +- **Detailed Metrics**: Requests/sec, latency (avg, p50, p95, p99), throughput, errors +- **Flexible Execution**: Run all tests or filter by category + +## Prerequisites + +1. **Server must be running**: Start your explorer server before running benchmarks + + ```bash + npm run server + ``` + +2. **Database must have data**: Ensure your collector has gathered some data + + ```bash + npm run collector + ``` + +3. **Install dependencies**: + ```bash + npm install + ``` + +## Quick Start + +### 1. Collect Test Data + +First, verify your database has data and collect sample IDs: + +```bash +npm run benchmark:collect-data +``` + +This will display sample data like: + +``` +โœ“ Collected 100000 account IDs +โœ“ Collected 100000 transaction IDs +โœ“ Collected 100000 receipt IDs +โœ“ Collected 100000 cycle numbers +``` + +### 2. Run Benchmarks + +Run all benchmark tests: + +```bash +npm run benchmark +``` + +Run quick benchmark (just transactions and accounts): + +```bash +npm run benchmark:quick +``` + +Run specific categories: + +```bash +npm run benchmark:stats # Only stats endpoints +npm run benchmark:cycles # Only cycle endpoints +``` + +## Available Tests + +### Basic Endpoints + +- `/totalData` - Aggregate data endpoint +- `/port` - Simple endpoint test + +### Cycle Endpoints + +- Latest cycles (count=10, count=50) +- Query by specific cycle number +- Query by cycle marker +- Cycle range queries + +### Account Endpoints + +- Latest accounts +- Query by specific account ID +- Paginated queries + +### Transaction Endpoints + +- Latest transactions +- Query by transaction ID (with/without balance changes) +- Query by account ID +- Transaction statistics + +### Receipt Endpoints + +- Latest receipts +- Query by receipt/transaction ID + +### Stats Endpoints + +- Validator stats +- Transaction stats (cycle-based and daily) +- Account stats +- Coin stats +- Network stats + +## Configuration + +### Command Line Options + +```bash +# Custom server URL +npm run benchmark -- --url=http://127.0.0.1:3000 + +# Custom sample size (number of IDs to collect) +npm run benchmark -- --sample=200 + +# Custom delay between tests (milliseconds) +npm run benchmark -- --delay=3000 + +# Filter tests by keyword +npm run benchmark -- transaction # Only tests with "transaction" in name +npm run benchmark -- account stats # Tests with "account" OR "stats" in name +``` + +### Combining Options + +```bash +npm run benchmark -- --url=http://127.0.0.1:6001 --sample=150 --delay=2000 transaction +``` + +## Understanding Results + +### Individual Test Output + +``` +Running: Transaction - By TxId +โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• +โœ“ Results: + Requests/sec: 1523.45 + Latency (avg): 65.32ms + Latency (p50): 58.12ms + Latency (p95): 142.67ms + Latency (p99): 198.23ms + Throughput: 2.45 MB/s + Errors: 0 + Timeouts: 0 +``` + +**Key Metrics:** + +- **Requests/sec**: How many requests the server can handle per second +- **Latency (avg)**: Average response time +- **Latency (p95)**: 95% of requests complete under this time +- **Latency (p99)**: 99% of requests complete under this time (important for SLAs) +- **Throughput**: Data transfer rate +- **Errors**: Failed requests (should be 0) + +### Summary Report + +After all tests complete, you'll see: + +``` +BENCHMARK SUMMARY +โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• +Test Name | Req/s | Avg(ms) | P95(ms) | P99(ms) | Errors +โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ +Total Data Endpoint | 2145.23 | 46.32 | 89.45 | 125.67 | 0 +Transaction - By TxId | 1523.45 | 65.32 | 142.67 | 198.23 | 0 +... +``` + +**Performance Insights:** + +- Endpoints sorted by various metrics +- Slowest endpoints (p99 latency) +- Highest throughput endpoints +- Total requests processed +- Error summary + +## Interpreting Results + +### Good Performance Indicators + +- โœ… Requests/sec > 1000 for simple queries +- โœ… p95 latency < 200ms +- โœ… p99 latency < 500ms +- โœ… Zero errors +- โœ… Consistent performance across runs + +### Performance Issues + +- โš ๏ธ p99 latency > 1000ms - May need optimization +- โš ๏ธ Errors > 0 - Server errors or timeouts +- โš ๏ธ Low throughput on simple endpoints - Potential bottleneck +- โš ๏ธ High variance between p50 and p99 - Inconsistent performance + +### Common Bottlenecks + +1. **Database queries** - Check indexes on frequently queried fields +2. **Large result sets** - Consider pagination limits +3. **Cache misses** - Verify cache is working for stats endpoints +4. **Memory pressure** - Monitor Node.js heap usage +5. **Rate limiting** - May affect high-concurrency tests + +## Optimization Tips + +### 1. Database Indexes + +Ensure indexes exist on: + +- `accounts.accountId` +- `transactions.txId` +- `transactions.accountId` +- `cycles.counter` +- `cycles.marker` +- `receipts.receiptId` + +### 2. Caching + +The test suite includes cached endpoints: + +- `/api/stats/validator?count=1000&responseType=array` (cached) +- Coin stats (cached) + +Monitor cache hit rates in benchmark results. + +### 3. Connection Pooling + +Check SQLite connection configuration in `src/storage/`. + +### 4. Rate Limiting + +The server has rate limiting enabled. For benchmarks, you may want to: + +- Disable rate limiting temporarily +- Run from localhost (allowed by default) +- Adjust limits in `src/server.ts:125-129` + +## Advanced Usage + +### Programmatic Usage + +```typescript +import { runBenchmarkSuite } from './benchmark/test-suite' + +const results = await runBenchmarkSuite({ + baseUrl: 'http://127.0.0.1:6001', + delayBetweenTests: 5000, + testsToRun: ['transaction', 'account'], + sampleSize: 100, +}) + +// Process results +results.forEach((result) => { + if (result.latencyP99 > 500) { + console.warn(`Slow endpoint: ${result.name}`) + } +}) +``` + +### Custom Test Creation + +Add your own tests to `benchmark/test-suite.ts`: + +```typescript +{ + name: 'My Custom Test', + urlGenerator: (data) => { + const accountId = getRandomItem(data.accountIds) + return `/api/my-endpoint?accountId=${accountId}` + }, + connections: 100, + duration: 30, + description: 'Tests my custom endpoint' +} +``` + +## Continuous Integration + +### GitHub Actions Example + +```yaml +- name: Run API Benchmarks + run: | + npm run server & + sleep 10 + npm run benchmark:quick + kill %1 +``` + +### Performance Regression Detection + +Save benchmark results and compare: + +```bash +npm run benchmark > results-$(date +%Y%m%d).txt +``` + +## Troubleshooting + +### Server Not Responding + +``` +Error: connect ECONNREFUSED 127.0.0.1:6001 +``` + +**Solution**: Ensure the server is running: `npm run server` + +### No Test Data Found + +``` +โš ๏ธ Warning: No accounts found in database +``` + +**Solution**: Run the collector first: `npm run collector` + +### High Error Rates + +``` +Errors: 150 +``` + +**Solution**: + +- Check server logs for errors +- Reduce concurrent connections +- Increase test duration +- Check database connection limits + +### Rate Limiting Errors + +``` +Too Many Requests (429) +``` + +**Solution**: + +- Run from localhost (automatically allowed) +- Adjust rate limits in server configuration +- Reduce concurrent connections + +## Best Practices + +1. **Warm-up**: Run a quick test first to warm up the server +2. **Baseline**: Establish baseline performance metrics +3. **Isolation**: Run benchmarks on a dedicated machine/environment +4. **Consistency**: Use the same test parameters for comparisons +5. **Monitoring**: Monitor server resources during tests +6. **Documentation**: Document any configuration changes + +## Example Workflow + +```bash +# 1. Start server +npm run server + +# 2. In another terminal, verify data +npm run benchmark:collect-data + +# 3. Run quick test to warm up +npm run benchmark:quick + +# 4. Run full benchmark suite +npm run benchmark + +# 5. Run specific category if issues found +npm run benchmark:stats + +# 6. Save results +npm run benchmark > benchmark-results-$(date +%Y%m%d).txt +``` + +## Resources + +- [autocannon documentation](https://github.com/mcollina/autocannon) +- [Fastify performance guide](https://www.fastify.io/docs/latest/Guides/Performance/) +- [SQLite optimization](https://www.sqlite.org/optoverview.html) diff --git a/benchmark/SUMMARY.md b/benchmark/SUMMARY.md new file mode 100644 index 0000000..033074f --- /dev/null +++ b/benchmark/SUMMARY.md @@ -0,0 +1,443 @@ +# Benchmark Suite - Implementation Summary + +## ๐ŸŽ‰ What's Been Added + +A production-ready API load testing suite that uses **real data from your database** to benchmark all major endpoints. + +### Files Created + +``` +benchmark/ +โ”œโ”€โ”€ README.md # Comprehensive documentation (400+ lines) +โ”œโ”€โ”€ QUICKSTART.md # Quick start guide for immediate use +โ”œโ”€โ”€ SUMMARY.md # This file +โ”œโ”€โ”€ data-collector.ts # Fetches real accountIds, txIds, etc. from DB +โ”œโ”€โ”€ test-suite.ts # Main benchmark suite with 25+ tests +โ””โ”€โ”€ example.ts # Simple example for quick testing +``` + +### NPM Scripts Added + +```json +{ + "benchmark": "Full test suite (25+ tests, ~20-30 min)", + "benchmark:collect-data": "Verify database has data", + "benchmark:quick": "Quick test (transactions & accounts only)", + "benchmark:example": "Simple 3-endpoint test (~30 seconds)", + "benchmark:stats": "Test statistics endpoints only", + "benchmark:cycles": "Test cycle endpoints only" +} +``` + +## ๐Ÿš€ Quick Start (3 Steps) + +### 1. Make sure server is running + +```bash +npm run server +``` + +### 2. Run a quick benchmark + +```bash +# In another terminal +npm run benchmark:example +``` + +### 3. View results + +``` +โœ“ Results: + Requests/sec: 1523.45 + Latency (avg): 65.32ms + Latency (p95): 142.67ms + Latency (p99): 198.23ms + Throughput: 2.45 MB/s + Errors: 0 +``` + +## ๐Ÿ’ก Key Features + +### Real Data Testing + +- โœ… Queries actual `accountId`, `txId`, `receiptId` from your database +- โœ… Tests real cycle numbers and markers +- โœ… Randomizes data to avoid cache effects +- โœ… Simulates realistic user query patterns + +### Comprehensive Coverage + +Tests all major endpoints: + +- `/totalData` - Aggregate stats +- `/api/cycleinfo` - By count, number, marker, range +- `/api/account` - By ID, paginated, by type +- `/api/transaction` - By ID, by account, with balance changes +- `/api/receipt` - By ID, by cycle range +- `/api/stats/*` - Validator, transaction, account, coin, network stats + +### Performance Metrics + +- **Requests per second** - Server throughput +- **Latency** - Average, p50, p95, p99 percentiles +- **Throughput** - MB/s data transfer +- **Error rates** - Failed requests and timeouts +- **Comparative analysis** - Slowest vs fastest endpoints + +## ๐Ÿ“Š What Gets Tested + +### Sample Test Breakdown + +| Category | Tests | Description | +| ---------------- | ----- | ----------------------------------------------- | +| **Cycles** | 5 | Latest, by number, by marker, range queries | +| **Accounts** | 3 | By ID, latest, paginated | +| **Transactions** | 6 | By ID, by account, with/without balance changes | +| **Receipts** | 2 | By ID, latest | +| **Stats** | 8 | Validator, transaction, account, coin, network | +| **Basic** | 2 | Port, total data | + +**Total: 25+ test configurations** + +### Load Parameters + +- **Connections**: 50-150 concurrent (per test) +- **Duration**: 10-30 seconds per test +- **Sample size**: 100000 real IDs from database +- **Randomization**: Each request uses different IDs + +## ๐ŸŽฏ Use Cases + +### 1. Pre-Deployment Testing + +```bash +# Quick sanity check before deploying +npm run benchmark:quick +``` + +### 2. Performance Monitoring + +```bash +# Weekly performance check +npm run benchmark > weekly-$(date +%Y%m%d).txt +``` + +### 3. Optimization Validation + +```bash +# Before optimization +npm run benchmark > before.txt + +# After adding indexes/caching +npm run benchmark > after.txt + +# Compare +diff before.txt after.txt +``` + +### 4. Bottleneck Identification + +```bash +# Run full suite to identify slow endpoints +npm run benchmark + +# Check the "Slowest endpoints" section +``` + +### 5. Capacity Planning + +```bash +# Test with different connection counts +# Edit test-suite.ts: connections: 500 +npm run benchmark +``` + +## ๐Ÿ“ˆ Expected Performance + +### Good Benchmarks (Reference) + +Based on typical Fastify + SQLite setup: + +| Endpoint Type | Req/s | p99 Latency | +| ---------------------- | --------- | ----------- | +| Simple queries (by ID) | 1000-2000 | < 200ms | +| Aggregate queries | 500-1000 | < 500ms | +| Cached stats | 2000-5000 | < 100ms | +| Basic endpoints | 3000+ | < 50ms | + +### Red Flags ๐Ÿšฉ + +- p99 latency > 1000ms +- Error rate > 0% +- Req/s < 100 for simple queries +- High variance between runs + +## ๐Ÿ”ง Customization + +### Add Custom Tests + +Edit [test-suite.ts](./test-suite.ts:39): + +```typescript +{ + name: 'My Custom Endpoint', + urlGenerator: (data) => { + const txId = getRandomItem(data.txIds) + return `/api/my-endpoint?txId=${txId}` + }, + connections: 100, + duration: 30, + description: 'Tests my custom endpoint' +} +``` + +### Change Test Parameters + +```bash +# Custom server URL +npm run benchmark -- --url=http://127.0.0.1:3000 + +# Larger sample size +npm run benchmark -- --sample=200 + +# Less delay between tests +npm run benchmark -- --delay=2000 + +# Filter specific tests +npm run benchmark -- transaction stats +``` + +### Programmatic Usage + +```typescript +import { runBenchmarkSuite } from './benchmark/test-suite' + +const results = await runBenchmarkSuite({ + baseUrl: 'http://127.0.0.1:6001', + sampleSize: 100000, + testsToRun: ['transaction'], +}) + +// Process results +console.log(results[0].requestsPerSec) +``` + +## ๐Ÿ“š Documentation Structure + +1. **QUICKSTART.md** (this is where most users should start) + + - 5-minute getting started guide + - Common commands + - Troubleshooting + +2. **README.md** (comprehensive reference) + + - Detailed feature explanation + - All configuration options + - Best practices + - CI/CD integration + - Performance optimization tips + +3. **example.ts** (learning by example) + + - Simple, commented code + - Tests 3 critical endpoints + - Easy to modify + +4. **test-suite.ts** (production-ready suite) + - 25+ comprehensive tests + - Real data integration + - Detailed metrics + +## ๐ŸŽ“ Learning Path + +### Beginner + +```bash +1. Read QUICKSTART.md +2. Run: npm run benchmark:example +3. Understand the output +``` + +### Intermediate + +```bash +1. Run: npm run benchmark:quick +2. Analyze slow endpoints +3. Add database indexes +4. Re-run and compare +``` + +### Advanced + +```bash +1. Run: npm run benchmark +2. Customize tests in test-suite.ts +3. Set up CI/CD integration +4. Create performance dashboards +``` + +## ๐Ÿ” Example Output + +### Individual Test + +``` +โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• +Running: Transaction - By TxId +Description: Query specific transaction by ID (critical path) +โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• + +โœ“ Results: + Requests/sec: 1523.45 + Latency (avg): 65.32ms + Latency (p50): 58.12ms + Latency (p95): 142.67ms + Latency (p99): 198.23ms + Throughput: 2.45 MB/s + Errors: 0 + Timeouts: 0 +``` + +### Summary Report + +``` +BENCHMARK SUMMARY +โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• +Test Name | Req/s | P99(ms) +โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ +Transaction - By TxId | 1523.45 | 198.23 +Account - By ID | 1845.67 | 145.89 +Total Data Endpoint | 2145.23 | 89.45 + +Aggregate Statistics: + Total tests run: 25 + Total requests: 342,156 + Average latency: 87.45ms + Total errors: 0 + +โš ๏ธ Slowest endpoints (p99 latency): + 1. Transaction with Balance Changes: 456.78ms + 2. Cycle Range Query: 234.56ms + 3. Account Paginated: 198.23ms + +โœ“ Highest throughput endpoints: + 1. Port Endpoint: 5234.12 req/s + 2. Total Data: 2145.23 req/s + 3. Validator Stats: 1845.67 req/s +``` + +## โš™๏ธ Dependencies Added + +```json +{ + "devDependencies": { + "@types/autocannon": "^7.12.7", + "autocannon": "^7.15.0" + } +} +``` + +**autocannon** - Official Fastify benchmarking tool + +- Fast, accurate HTTP/1.1 benchmarking +- Written in Node.js +- Highly configurable +- Well-maintained by the Fastify team + +## ๐Ÿšฆ Next Steps + +### Immediate (5 minutes) + +```bash +npm run benchmark:example +``` + +### Short Term (today) + +1. Run `npm run benchmark:quick` +2. Identify any slow endpoints (p99 > 500ms) +3. Check database indexes + +### Medium Term (this week) + +1. Run full `npm run benchmark` +2. Establish performance baselines +3. Document expected performance +4. Set up weekly monitoring + +### Long Term + +1. Add to CI/CD pipeline +2. Set performance budgets +3. Create alerts for regressions +4. Dashboard integration + +## ๐Ÿ’ผ Production Recommendations + +### Before Deploying + +```bash +# 1. Run quick test +npm run benchmark:quick + +# 2. Verify no errors +grep "Errors:" results.txt + +# 3. Check p99 latencies +grep "p99:" results.txt +``` + +### Regular Monitoring + +```bash +# Weekly benchmark (cron job) +0 2 * * 0 cd /path/to/explorer && npm run benchmark > /var/log/benchmarks/$(date +\%Y\%m\%d).txt +``` + +### Performance Budgets + +Set thresholds and alert if exceeded: + +- Transaction by ID: p99 < 200ms +- Account by ID: p99 < 200ms +- Stats endpoints: p99 < 500ms +- Error rate: 0% + +## ๐Ÿ†˜ Getting Help + +Issues? Check these in order: + +1. **QUICKSTART.md** - Common problems and solutions +2. **README.md** - Detailed documentation +3. **example.ts** - Simple working example +4. **Server logs** - Check for errors during tests +5. **Database** - Ensure it has data and isn't locked + +## โœ… Checklist + +Before running benchmarks: + +- [ ] Server is running (`npm run server`) +- [ ] Database has data (`npm run collector`) +- [ ] Dependencies installed (`npm install`) +- [ ] TypeScript compiled (`npm run prepare`) + +For best results: + +- [ ] Run on dedicated test environment +- [ ] No other heavy processes running +- [ ] Consistent test conditions +- [ ] Warm up server first (run once, ignore results) + +--- + +**Ready to benchmark? Start here:** + +```bash +npm run benchmark:example +``` + +**Want more details? Read:** + +- [QUICKSTART.md](./QUICKSTART.md) - Quick start guide +- [README.md](./README.md) - Full documentation diff --git a/benchmark/artillery-accounts.yml b/benchmark/artillery-accounts.yml new file mode 100644 index 0000000..55fc25d --- /dev/null +++ b/benchmark/artillery-accounts.yml @@ -0,0 +1,40 @@ +# Artillery load test for Account queries +# Run: artillery run benchmark/artillery-accounts.yml +# Or with custom target: artillery run --target http://127.0.0.1:6001 benchmark/artillery-accounts.yml + +config: + target: "http://127.0.0.1:6001" + phases: + - duration: 60 + arrivalRate: 50 + name: "Warm up" + - duration: 120 + arrivalRate: 100 + name: "Sustained load" + - duration: 60 + arrivalRate: 200 + name: "Spike test" + + # Load test data from CSV + payload: + path: "../benchmark-data/accounts.csv" + fields: + - "accountId" + order: "random" # Pick random rows + skipHeader: true + + # Performance thresholds + ensure: + maxErrorRate: 1 # Max 1% errors + p95: 200 # 95th percentile < 200ms + p99: 500 # 99th percentile < 500ms + +scenarios: + - name: "Query Account by ID" + flow: + - get: + url: "/api/account?accountId={{ accountId }}" + expect: + - statusCode: 200 + - contentType: json + - hasProperty: success diff --git a/benchmark/artillery-combined.yml b/benchmark/artillery-combined.yml new file mode 100644 index 0000000..74712ef --- /dev/null +++ b/benchmark/artillery-combined.yml @@ -0,0 +1,82 @@ +# Artillery load test - Combined Account & Transaction queries +# Run: artillery run benchmark/artillery-combined.yml +# With custom options: artillery run --duration 300 --arrival-rate 150 benchmark/artillery-combined.yml + +config: + target: "http://127.0.0.1:6001" + phases: + - duration: 30 + arrivalRate: 20 + name: "Ramp up" + - duration: 60 + arrivalRate: 50 + name: "Warm up" + - duration: 180 + arrivalRate: 100 + name: "Sustained load" + - duration: 60 + arrivalRate: 200 + name: "Spike test" + - duration: 30 + arrivalRate: 50 + name: "Ramp down" + + # Load test data from CSV + payload: + path: "../benchmark-data/combined.csv" + fields: + - "accountId" + - "txId" + - "receiptId" + - "cycleNumber" + order: "random" + skipHeader: true + + # Performance thresholds + ensure: + maxErrorRate: 1 + p95: 250 + p99: 600 + + # Metrics plugins (optional) + plugins: + metrics-by-endpoint: + # Group metrics by endpoint for detailed analysis + stripQueryString: false + +scenarios: + # 50% of traffic: Transaction queries + - name: "Transaction by ID" + weight: 50 + flow: + - get: + url: "/api/transaction?txId={{ txId }}" + expect: + - statusCode: 200 + + # 30% of traffic: Account queries + - name: "Account by ID" + weight: 30 + flow: + - get: + url: "/api/account?accountId={{ accountId }}" + expect: + - statusCode: 200 + + # 10% of traffic: Receipt queries + - name: "Receipt by ID" + weight: 10 + flow: + - get: + url: "/api/receipt?txId={{ receiptId }}" + expect: + - statusCode: 200 + + # 10% of traffic: Cycle queries + - name: "Cycle by Number" + weight: 10 + flow: + - get: + url: "/api/cycleinfo?cycleNumber={{ cycleNumber }}" + expect: + - statusCode: 200 diff --git a/benchmark/artillery-transactions.yml b/benchmark/artillery-transactions.yml new file mode 100644 index 0000000..d86afec --- /dev/null +++ b/benchmark/artillery-transactions.yml @@ -0,0 +1,48 @@ +# Artillery load test for Transaction queries +# Run: artillery run benchmark/artillery-transactions.yml + +config: + target: "http://127.0.0.1:6001" + phases: + - duration: 60 + arrivalRate: 50 + name: "Warm up" + - duration: 120 + arrivalRate: 100 + name: "Sustained load" + - duration: 60 + arrivalRate: 200 + name: "Spike test" + + # Load test data from CSV + payload: + path: "../benchmark-data/transactions.csv" + fields: + - "txId" + order: "random" + skipHeader: true + + # Performance thresholds + ensure: + maxErrorRate: 1 + p95: 200 + p99: 500 + +scenarios: + - name: "Query Transaction by ID" + weight: 70 + flow: + - get: + url: "/api/transaction?txId={{ txId }}" + expect: + - statusCode: 200 + - contentType: json + + - name: "Query Transaction with Balance Changes" + weight: 30 + flow: + - get: + url: "/api/transaction?txId={{ txId }}&balanceChanges=true" + expect: + - statusCode: 200 + - contentType: json diff --git a/benchmark/autocannon-advanced.ts b/benchmark/autocannon-advanced.ts new file mode 100644 index 0000000..309c582 --- /dev/null +++ b/benchmark/autocannon-advanced.ts @@ -0,0 +1,228 @@ +/** + * Advanced autocannon testing with data from JSON files + * This script loads test data and randomly queries different IDs + * + * Run: npm run benchmark:autocannon-advanced + */ + +import autocannon from 'autocannon' +import * as fs from 'fs' +import * as path from 'path' + +interface TestData { + accountIds: string[] + txIds: string[] + receiptIds: string[] + cycleNumbers: number[] +} + +function loadTestData(): TestData { + const dataPath = path.join(__dirname, '../benchmark-data/test-data.json') + + if (!fs.existsSync(dataPath)) { + console.error('โŒ Test data not found!') + console.error('Run: npm run benchmark:export-data') + process.exit(1) + } + + const data = JSON.parse(fs.readFileSync(dataPath, 'utf-8')) + console.log('โœ“ Loaded test data:') + console.log(` - ${data.accountIds.length} account IDs`) + console.log(` - ${data.txIds.length} transaction IDs`) + console.log(` - ${data.receiptIds.length} receipt IDs`) + console.log(` - ${data.cycleNumbers.length} cycle numbers\n`) + + return data +} + +async function runAccountTest(testData: TestData, baseUrl: string): Promise { + console.log('๐Ÿ”ฅ Test 1: Account Queries (Random IDs)\n') + + let requestCount = 0 + + const result: any = await autocannon({ + url: baseUrl, + connections: 100, + duration: 30, + setupClient: (client: any) => { + client.on('response', () => { + requestCount++ + if (requestCount % 1000 === 0) { + process.stdout.write(`\rRequests sent: ${requestCount}`) + } + }) + }, + requests: [ + { + method: 'GET', + // This function is called for each request, returning different IDs + setupRequest: (req: any) => { + const randomId = testData.accountIds[Math.floor(Math.random() * testData.accountIds.length)] + return { + ...req, + path: `/api/account?accountId=${randomId}`, + } + }, + }, + ], + }) + + console.log('\n\nโœ“ Account Test Results:') + console.log(` Requests/sec: ${result.requests.average.toFixed(2)}`) + console.log(` Latency (avg): ${result.latency.mean.toFixed(2)}ms`) + console.log(` Latency (p95): ${result.latency.p97_5.toFixed(2)}ms`) + console.log(` Latency (p99): ${result.latency.p99.toFixed(2)}ms`) + console.log(` Total requests: ${requestCount}`) + console.log(` Errors: ${result.errors}`) + + return result +} + +async function runTransactionTest(testData: TestData, baseUrl: string): Promise { + console.log('\n\n๐Ÿ”ฅ Test 2: Transaction Queries (Random IDs)\n') + + let requestCount = 0 + + const result: any = await autocannon({ + url: baseUrl, + connections: 100, + duration: 30, + setupClient: (client: any) => { + client.on('response', () => { + requestCount++ + if (requestCount % 1000 === 0) { + process.stdout.write(`\rRequests sent: ${requestCount}`) + } + }) + }, + requests: [ + { + method: 'GET', + setupRequest: (req: any) => { + const randomId = testData.txIds[Math.floor(Math.random() * testData.txIds.length)] + return { + ...req, + path: `/api/transaction?txId=${randomId}`, + } + }, + }, + ], + }) + + console.log('\n\nโœ“ Transaction Test Results:') + console.log(` Requests/sec: ${result.requests.average.toFixed(2)}`) + console.log(` Latency (avg): ${result.latency.mean.toFixed(2)}ms`) + console.log(` Latency (p95): ${result.latency.p97_5.toFixed(2)}ms`) + console.log(` Latency (p99): ${result.latency.p99.toFixed(2)}ms`) + console.log(` Total requests: ${requestCount}`) + console.log(` Errors: ${result.errors}`) + + return result +} + +async function runMixedTest(testData: TestData, baseUrl: string): Promise { + console.log('\n\n๐Ÿ”ฅ Test 3: Mixed Load (50% Accounts, 50% Transactions)\n') + + let requestCount = 0 + let accountRequests = 0 + let txRequests = 0 + + const result: any = await autocannon({ + url: baseUrl, + connections: 100, + duration: 60, + setupClient: (client: any) => { + client.on('response', () => { + requestCount++ + if (requestCount % 1000 === 0) { + process.stdout.write( + `\rRequests: ${requestCount} (Accounts: ${accountRequests}, Txs: ${txRequests})` + ) + } + }) + }, + requests: [ + { + method: 'GET', + setupRequest: (req: any) => { + // 50% chance for each type + if (Math.random() < 0.5) { + const randomId = testData.accountIds[Math.floor(Math.random() * testData.accountIds.length)] + accountRequests++ + return { + ...req, + path: `/api/account?accountId=${randomId}`, + } + } else { + const randomId = testData.txIds[Math.floor(Math.random() * testData.txIds.length)] + txRequests++ + return { + ...req, + path: `/api/transaction?txId=${randomId}`, + } + } + }, + }, + ], + }) + + console.log('\n\nโœ“ Mixed Test Results:') + console.log(` Requests/sec: ${result.requests.average.toFixed(2)}`) + console.log(` Latency (avg): ${result.latency.mean.toFixed(2)}ms`) + console.log(` Latency (p95): ${result.latency.p97_5.toFixed(2)}ms`) + console.log(` Latency (p99): ${result.latency.p99.toFixed(2)}ms`) + console.log(` Total requests: ${requestCount}`) + console.log( + ` Account queries: ${accountRequests} (${((accountRequests / requestCount) * 100).toFixed(1)}%)` + ) + console.log(` Transaction queries: ${txRequests} (${((txRequests / requestCount) * 100).toFixed(1)}%)`) + console.log(` Errors: ${result.errors}`) + + return result +} + +async function main(): Promise { + const baseUrl = process.env.API_URL || 'http://127.0.0.1:6001' + const testType = process.argv[2] || 'all' + + console.log('='.repeat(70)) + console.log('autocannon Advanced Load Test') + console.log('='.repeat(70)) + console.log(`Target: ${baseUrl}`) + console.log(`Test type: ${testType}\n`) + + const testData = loadTestData() + + switch (testType) { + case 'accounts': + await runAccountTest(testData, baseUrl) + break + case 'transactions': + await runTransactionTest(testData, baseUrl) + break + case 'mixed': + await runMixedTest(testData, baseUrl) + break + case 'all': + await runAccountTest(testData, baseUrl) + await new Promise((resolve) => setTimeout(resolve, 5000)) + await runTransactionTest(testData, baseUrl) + await new Promise((resolve) => setTimeout(resolve, 5000)) + await runMixedTest(testData, baseUrl) + break + default: + console.error(`Unknown test type: ${testType}`) + console.error('Valid types: accounts, transactions, mixed, all') + process.exit(1) + } + + console.log('\n' + '='.repeat(70)) + console.log('All tests complete!') + console.log('='.repeat(70)) + process.exit(0) +} + +main().catch((error) => { + console.error('Test failed:', error) + process.exit(1) +}) diff --git a/benchmark/autocannon-cli.sh b/benchmark/autocannon-cli.sh new file mode 100755 index 0000000..b317387 --- /dev/null +++ b/benchmark/autocannon-cli.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# autocannon CLI examples using exported test data +# These examples show how to use autocannon directly from command line + +set -e + +# Colors for output +GREEN='\033[0;32m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +BASE_URL="${API_URL:-http://127.0.0.1:6001}" +DATA_DIR="benchmark-data" + +echo -e "${BLUE}=== autocannon CLI Load Tests ===${NC}\n" + +# Check if data files exist +if [ ! -d "$DATA_DIR" ]; then + echo "Error: Test data not found. Run: npm run benchmark:export-data" + exit 1 +fi + +# Test 1: Account queries +echo -e "${GREEN}Test 1: Account Queries${NC}" +echo "Reading random account IDs from $DATA_DIR/accounts.csv..." + +# Use a random account ID from the CSV +ACCOUNT_ID=$(tail -n +2 "$DATA_DIR/accounts.csv" | shuf -n 1) +echo "Testing with Account ID: $ACCOUNT_ID" + +autocannon \ + -c 100 \ + -d 30 \ + -m GET \ + "$BASE_URL/api/account?accountId=$ACCOUNT_ID" + +echo "" +echo -e "${GREEN}Test 2: Transaction Queries${NC}" +echo "Reading random transaction IDs from $DATA_DIR/transactions.csv..." + +# Use a random transaction ID from the CSV +TX_ID=$(tail -n +2 "$DATA_DIR/transactions.csv" | shuf -n 1) +echo "Testing with Transaction ID: $TX_ID" + +autocannon \ + -c 100 \ + -d 30 \ + -m GET \ + "$BASE_URL/api/transaction?txId=$TX_ID" + +echo "" +echo -e "${GREEN}Test 3: Mixed Load (Multiple IDs)${NC}" +echo "Testing with 10 random transaction IDs (simulates varied queries)..." + +# Create a temp file with multiple URLs +TEMP_URLS=$(mktemp) +tail -n +2 "$DATA_DIR/transactions.csv" | shuf -n 10 | \ + awk -v base="$BASE_URL" '{print base "/api/transaction?txId=" $1}' > "$TEMP_URLS" + +echo "Generated test URLs:" +cat "$TEMP_URLS" +echo "" + +# Note: autocannon doesn't support multiple URLs from file in CLI mode +# But you can pipe them or use the Node.js script approach below +echo "For multiple URLs, use: npm run benchmark:autocannon-advanced" + +rm "$TEMP_URLS" + +echo "" +echo -e "${BLUE}=== Tests Complete ===${NC}" +echo "" +echo "For advanced multi-URL testing, see:" +echo " - benchmark/autocannon-advanced.ts" +echo " - npm run benchmark:autocannon-advanced" diff --git a/benchmark/data-collector.ts b/benchmark/data-collector.ts new file mode 100644 index 0000000..6d8284b --- /dev/null +++ b/benchmark/data-collector.ts @@ -0,0 +1,139 @@ +import * as Storage from '../src/storage' +import { + AccountDB, + CycleDB, + TransactionDB, + ReceiptDB, +} from '../src/storage' + +export interface TestData { + accountIds: string[] + txIds: string[] + receiptIds: string[] + cycleNumbers: number[] + cycleMarkers: string[] + validAccountId: string + validTxId: string + validReceiptId: string + latestCycle: number +} + +export async function collectTestData(sampleSize = 100000): Promise { + console.log('Collecting test data from database...') + + await Storage.initializeDB() + + const testData: TestData = { + accountIds: [], + txIds: [], + receiptIds: [], + cycleNumbers: [], + cycleMarkers: [], + validAccountId: '', + validTxId: '', + validReceiptId: '', + latestCycle: 0, + } + + try { + // Collect account IDs + console.log('Fetching account IDs...') + const accounts = await AccountDB.queryAccounts({ limit: sampleSize }) + testData.accountIds = accounts.map(acc => acc.accountId).filter(Boolean) + if (testData.accountIds.length > 0) { + testData.validAccountId = testData.accountIds[0] + } + console.log(` โœ“ Collected ${testData.accountIds.length} account IDs`) + + // Collect transaction IDs + console.log('Fetching transaction IDs...') + const transactions = await TransactionDB.queryTransactions({ limit: sampleSize }) + testData.txIds = transactions.map(tx => tx.txId).filter(Boolean) + if (testData.txIds.length > 0) { + testData.validTxId = testData.txIds[0] + } + console.log(` โœ“ Collected ${testData.txIds.length} transaction IDs`) + + // Collect receipt IDs + console.log('Fetching receipt IDs...') + const receipts = await ReceiptDB.queryReceipts({ limit: sampleSize }) + testData.receiptIds = receipts.map(r => r.receiptId).filter(Boolean) + if (testData.receiptIds.length > 0) { + testData.validReceiptId = testData.receiptIds[0] + } + console.log(` โœ“ Collected ${testData.receiptIds.length} receipt IDs`) + + // Collect cycle data + console.log('Fetching cycle data...') + const cycles = await CycleDB.queryLatestCycleRecords(sampleSize) + testData.cycleNumbers = cycles.map(c => c.counter).filter(n => n !== undefined) + testData.cycleMarkers = cycles.map(c => c.cycleMarker as string).filter(Boolean) + + if (testData.cycleNumbers.length > 0) { + testData.latestCycle = Math.max(...testData.cycleNumbers) + } + console.log(` โœ“ Collected ${testData.cycleNumbers.length} cycle numbers`) + console.log(` โœ“ Collected ${testData.cycleMarkers.length} cycle markers`) + console.log(` โœ“ Latest cycle: ${testData.latestCycle}`) + + // Validation + console.log('\nTest data summary:') + console.log(` Accounts: ${testData.accountIds.length}`) + console.log(` Transactions: ${testData.txIds.length}`) + console.log(` Receipts: ${testData.receiptIds.length}`) + console.log(` Cycles: ${testData.cycleNumbers.length}`) + console.log(` Markers: ${testData.cycleMarkers.length}`) + + if (testData.accountIds.length === 0) { + console.warn('โš ๏ธ Warning: No accounts found in database') + } + if (testData.txIds.length === 0) { + console.warn('โš ๏ธ Warning: No transactions found in database') + } + if (testData.cycleNumbers.length === 0) { + console.warn('โš ๏ธ Warning: No cycles found in database') + } + + } catch (error) { + console.error('Error collecting test data:', error) + throw error + } finally { + await Storage.closeDatabase() + } + + return testData +} + +export function getRandomItem(array: T[]): T | null { + if (array.length === 0) return null + return array[Math.floor(Math.random() * array.length)] +} + +export function getRandomItems(array: T[], count: number): T[] { + if (array.length === 0) return [] + const shuffled = [...array].sort(() => 0.5 - Math.random()) + return shuffled.slice(0, Math.min(count, array.length)) +} + +// CLI interface +if (require.main === module) { + const sampleSize = process.argv[2] ? parseInt(process.argv[2]) : 100000 + + collectTestData(sampleSize) + .then(data => { + console.log('\nโœ“ Test data collection complete!') + console.log('\nSample data:') + console.log(` Account ID: ${data.validAccountId}`) + console.log(` Transaction ID: ${data.validTxId}`) + console.log(` Receipt ID: ${data.validReceiptId}`) + console.log(` Latest Cycle: ${data.latestCycle}`) + if (data.cycleMarkers.length > 0) { + console.log(` Sample Marker: ${data.cycleMarkers[0]}`) + } + process.exit(0) + }) + .catch(error => { + console.error('Failed to collect test data:', error) + process.exit(1) + }) +} diff --git a/benchmark/example.ts b/benchmark/example.ts new file mode 100644 index 0000000..de8eebb --- /dev/null +++ b/benchmark/example.ts @@ -0,0 +1,124 @@ +/** + * Example: Quick benchmark of key endpoints + * + * This demonstrates how to run a focused benchmark on critical endpoints + * without running the full test suite. + */ + +import autocannon from 'autocannon' +import { collectTestData } from './data-collector' + +// Configuration +const SAMPLE_SIZE = parseInt(process.env.SAMPLE_SIZE || '100000') +const CONNECTIONS = parseInt(process.env.CONNECTIONS || '100') +const DURATION = parseInt(process.env.DURATION || '30') +const BASE_URL = process.env.API_URL || 'http://127.0.0.1:6001' + +async function quickBenchmark(): Promise { + console.log('Quick Benchmark - Testing Critical Endpoints\n') + console.log(`Configuration:`) + console.log(` Sample size: ${SAMPLE_SIZE} IDs per endpoint`) + console.log(` Connections: ${CONNECTIONS}`) + console.log(` Duration: ${DURATION}s per test`) + console.log(` Target: ${BASE_URL}`) + console.log('') + + // Collect real data from database + const testData = await collectTestData(SAMPLE_SIZE) + + if (!testData.txIds.length || !testData.accountIds.length) { + console.error('โŒ No test data available. Run the collector first.') + process.exit(1) + } + + // Test 1: Transaction by ID (most critical query) + const totalTxIds = testData.txIds.length + console.log(`Testing: GET /api/transaction?txId=... (rotating through ${totalTxIds} different IDs)\n`) + + const txResult: any = await autocannon({ + url: BASE_URL, + connections: CONNECTIONS, + duration: DURATION, + requests: [ + { + method: 'GET', + setupRequest: (req: any) => { + // Pick a different random txId for each request + const randomTxId = testData.txIds[Math.floor(Math.random() * totalTxIds)] + return { + ...req, + path: `/api/transaction?txId=${randomTxId}`, + } + }, + }, + ], + }) + + console.log(`\nTransaction Query Results (rotating ${totalTxIds} IDs):`) + console.log(` Requests/sec: ${txResult.requests.average}`) + console.log(` Latency p99: ${txResult.latency.p99}ms`) + + // Test 2: Account by ID + const totalAccountIds = testData.accountIds.length + console.log( + `\n\nTesting: GET /api/account?accountId=... (rotating through ${totalAccountIds} different IDs)\n` + ) + + const accountResult: any = await autocannon({ + url: BASE_URL, + connections: CONNECTIONS, + duration: DURATION, + requests: [ + { + method: 'GET', + setupRequest: (req: any) => { + // Pick a different random accountId for each request + const randomAccountId = testData.accountIds[Math.floor(Math.random() * totalAccountIds)] + return { + ...req, + path: `/api/account?accountId=${randomAccountId}`, + } + }, + }, + ], + }) + + console.log(`\nAccount Query Results (rotating ${totalAccountIds} IDs):`) + console.log(` Requests/sec: ${accountResult.requests.average}`) + console.log(` Latency p99: ${accountResult.latency.p99}ms`) + + // Test 3: Total Data endpoint + console.log('\n\nTesting: GET /totalData (same endpoint, no ID variation)\n') + + const totalDataResult: any = await autocannon({ + url: `${BASE_URL}/totalData`, + connections: CONNECTIONS, + duration: DURATION, + }) + + console.log('\nTotal Data Results:') + console.log(` Requests/sec: ${totalDataResult.requests.average}`) + console.log(` Latency p99: ${totalDataResult.latency.p99}ms`) + + // Summary + console.log('\n' + '='.repeat(60)) + console.log('QUICK BENCHMARK SUMMARY') + console.log('='.repeat(60)) + console.log(`Transaction query: ${txResult.requests.average.toFixed(0)} req/s`) + console.log(`Account query: ${accountResult.requests.average.toFixed(0)} req/s`) + console.log(`Total data: ${totalDataResult.requests.average.toFixed(0)} req/s`) + + // Performance check + if (txResult.latency.p99 > 500) { + console.log('\nโš ๏ธ Warning: Transaction queries are slow (p99 > 500ms)') + } else { + console.log('\nโœ… All endpoints performing well') + } + + process.exit(0) +} + +quickBenchmark().catch((error) => { + console.error('Benchmark failed:', error) + process.exit(1) +}) diff --git a/benchmark/export-data.sh b/benchmark/export-data.sh new file mode 100755 index 0000000..aa40679 --- /dev/null +++ b/benchmark/export-data.sh @@ -0,0 +1,103 @@ +#!/bin/bash +# Quick data export script using SQLite3 CLI +# Usage: ./benchmark/export-data.sh [num_rows] + +set -e + +# Number of rows to export (default: 100000) +NUM_ROWS="${1:-100000}" +DEST="benchmark-data" + +# Colors +GREEN='\033[0;32m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +echo -e "${BLUE}Exporting test data using SQLite3 CLI...${NC}" +echo "Rows per table: $NUM_ROWS" +echo "" + +# Create destination directory +mkdir -p "$DEST" + +# Check if databases exist +if [ ! -d "collector-db" ]; then + echo "Error: collector-db/ directory not found" + echo "Make sure you're in the project root directory" + exit 1 +fi + +# Export Accounts +echo -n "Exporting accounts... " +sqlite3 -header -csv collector-db/accounts.db \ + "SELECT accountId FROM accounts ORDER BY RANDOM() LIMIT $NUM_ROWS;" \ + > "$DEST/accounts.csv" +ACCOUNT_COUNT=$(tail -n +2 "$DEST/accounts.csv" | wc -l | tr -d ' ') +echo -e "${GREEN}โœ“${NC} $ACCOUNT_COUNT rows" + +# Export Transactions +echo -n "Exporting transactions... " +sqlite3 -header -csv collector-db/transactions.db \ + "SELECT txId FROM transactions ORDER BY RANDOM() LIMIT $NUM_ROWS;" \ + > "$DEST/transactions.csv" +TX_COUNT=$(tail -n +2 "$DEST/transactions.csv" | wc -l | tr -d ' ') +echo -e "${GREEN}โœ“${NC} $TX_COUNT rows" + +# Export Receipts +echo -n "Exporting receipts... " +sqlite3 -header -csv collector-db/receipts.db \ + "SELECT receiptId FROM receipts ORDER BY RANDOM() LIMIT $NUM_ROWS;" \ + > "$DEST/receipts.csv" +RECEIPT_COUNT=$(tail -n +2 "$DEST/receipts.csv" | wc -l | tr -d ' ') +echo -e "${GREEN}โœ“${NC} $RECEIPT_COUNT rows" + +# Export Cycles +echo -n "Exporting cycles... " +sqlite3 -header -csv collector-db/cycles.db \ + "SELECT counter FROM cycles ORDER BY counter DESC LIMIT $NUM_ROWS;" \ + > "$DEST/cycles.csv" +CYCLE_COUNT=$(tail -n +2 "$DEST/cycles.csv" | wc -l | tr -d ' ') +echo -e "${GREEN}โœ“${NC} $CYCLE_COUNT rows" + +# Export Cycle Markers (for marker-based queries) +echo -n "Exporting cycle markers... " +sqlite3 -csv collector-db/cycles.db \ + "SELECT marker FROM cycles ORDER BY counter DESC LIMIT $NUM_ROWS;" \ + > "$DEST/cycle-markers.csv" +MARKER_COUNT=$(wc -l < "$DEST/cycle-markers.csv" | tr -d ' ') +echo -e "${GREEN}โœ“${NC} $MARKER_COUNT rows" + +# Create combined CSV +echo -n "Creating combined CSV... " +echo "accountId,txId,receiptId,cycleNumber" > "$DEST/combined.csv" +paste -d',' \ + <(tail -n +2 "$DEST/accounts.csv") \ + <(tail -n +2 "$DEST/transactions.csv") \ + <(tail -n +2 "$DEST/receipts.csv") \ + <(tail -n +2 "$DEST/cycles.csv") \ + >> "$DEST/combined.csv" +COMBINED_COUNT=$(tail -n +2 "$DEST/combined.csv" | wc -l | tr -d ' ') +echo -e "${GREEN}โœ“${NC} $COMBINED_COUNT rows" + +# Export as JSON for other tools +echo -n "Creating JSON exports... " +sqlite3 -json collector-db/accounts.db \ + "SELECT accountId FROM accounts ORDER BY RANDOM() LIMIT $NUM_ROWS;" \ + > "$DEST/accounts.json" + +sqlite3 -json collector-db/transactions.db \ + "SELECT txId FROM transactions ORDER BY RANDOM() LIMIT $NUM_ROWS;" \ + > "$DEST/transactions.json" +echo -e "${GREEN}โœ“${NC}" + +echo "" +echo -e "${GREEN}=== Export Complete ===${NC}" +echo "Output directory: $DEST/" +echo "" +echo "Files created:" +ls -lh "$DEST" | tail -n +2 | awk '{printf " %s (%s)\n", $9, $5}' +echo "" +echo "Usage:" +echo " - Artillery: artillery run benchmark/artillery-accounts.yml" +echo " - autocannon: npm run benchmark:autocannon-advanced" +echo " - CLI test: TX_ID=\$(tail -n +2 $DEST/transactions.csv | shuf -n 1) && autocannon http://127.0.0.1:6001/api/transaction?txId=\$TX_ID" diff --git a/benchmark/export-test-data.ts b/benchmark/export-test-data.ts new file mode 100644 index 0000000..a820249 --- /dev/null +++ b/benchmark/export-test-data.ts @@ -0,0 +1,107 @@ +/** + * Export test data to JSON/CSV for CLI load testing tools + */ + +import * as fs from 'fs' +import * as path from 'path' +import { collectTestData } from './data-collector' + +interface ExportedTestData { + accountIds: string[] + txIds: string[] + receiptIds: string[] + cycleNumbers: number[] + cycleMarkers: string[] +} + +async function exportTestData() { + const sampleSize = parseInt(process.argv[2]) || 500 + const outputDir = path.join(__dirname, '../benchmark-data') + + console.log(`Exporting ${sampleSize} test data samples...\n`) + + // Collect data + const testData = await collectTestData(sampleSize) + + // Create output directory + if (!fs.existsSync(outputDir)) { + fs.mkdirSync(outputDir, { recursive: true }) + } + + // Export as JSON + const jsonData: ExportedTestData = { + accountIds: testData.accountIds, + txIds: testData.txIds, + receiptIds: testData.receiptIds, + cycleNumbers: testData.cycleNumbers, + cycleMarkers: testData.cycleMarkers, + } + + const jsonPath = path.join(outputDir, 'test-data.json') + fs.writeFileSync(jsonPath, JSON.stringify(jsonData, null, 2)) + console.log(`\nโœ“ JSON exported to: ${jsonPath}`) + + // Export as CSV for Artillery + const accountsCsv = path.join(outputDir, 'accounts.csv') + const accountsCsvContent = ['accountId', ...testData.accountIds].join('\n') + fs.writeFileSync(accountsCsv, accountsCsvContent) + console.log(`โœ“ Accounts CSV: ${accountsCsv}`) + + const txsCsv = path.join(outputDir, 'transactions.csv') + const txsCsvContent = ['txId', ...testData.txIds].join('\n') + fs.writeFileSync(txsCsv, txsCsvContent) + console.log(`โœ“ Transactions CSV: ${txsCsv}`) + + const receiptsCsv = path.join(outputDir, 'receipts.csv') + const receiptsCsvContent = ['receiptId', ...testData.receiptIds].join('\n') + fs.writeFileSync(receiptsCsv, receiptsCsvContent) + console.log(`โœ“ Receipts CSV: ${receiptsCsv}`) + + const cyclesCsv = path.join(outputDir, 'cycles.csv') + const cyclesCsvContent = ['cycleNumber', ...testData.cycleNumbers].join('\n') + fs.writeFileSync(cyclesCsv, cyclesCsvContent) + console.log(`โœ“ Cycles CSV: ${cyclesCsv}`) + + // Export combined CSV with all data + const combinedCsv = path.join(outputDir, 'combined.csv') + const maxLength = Math.max( + testData.accountIds.length, + testData.txIds.length, + testData.receiptIds.length, + testData.cycleNumbers.length + ) + + const rows = ['accountId,txId,receiptId,cycleNumber'] + for (let i = 0; i < maxLength; i++) { + rows.push( + [ + testData.accountIds[i] || '', + testData.txIds[i] || '', + testData.receiptIds[i] || '', + testData.cycleNumbers[i] || '', + ].join(',') + ) + } + fs.writeFileSync(combinedCsv, rows.join('\n')) + console.log(`โœ“ Combined CSV: ${combinedCsv}`) + + console.log('\n' + '='.repeat(60)) + console.log('Export Summary:') + console.log('='.repeat(60)) + console.log(`Total accounts: ${testData.accountIds.length}`) + console.log(`Total transactions: ${testData.txIds.length}`) + console.log(`Total receipts: ${testData.receiptIds.length}`) + console.log(`Total cycles: ${testData.cycleNumbers.length}`) + console.log(`\nOutput directory: ${outputDir}`) + console.log('\nUse these files with:') + console.log(' - Artillery: artillery run benchmark/artillery-*.yml') + console.log(' - autocannon: See benchmark/autocannon-cli.sh') + console.log('='.repeat(60)) + + process.exit(0) +} + +exportTestData().catch(error => { + console.error('Export failed:', error) + process.exit(1) +}) diff --git a/benchmark/test-suite.ts b/benchmark/test-suite.ts new file mode 100644 index 0000000..c3a8166 --- /dev/null +++ b/benchmark/test-suite.ts @@ -0,0 +1,452 @@ +import autocannon from 'autocannon' +import { collectTestData, getRandomItem, TestData } from './data-collector' + +interface BenchmarkResult { + name: string + url: string + requestsPerSec: number + latencyAvg: number + latencyP50: number + latencyP95: number + latencyP99: number + throughputMBs: number + errors: number + timeouts: number + duration: number + connections: number +} + +interface BenchmarkTest { + name: string + urlGenerator: (data: TestData) => string | string[] + connections: number + duration: number + pipelining?: number + description?: string +} + +const tests: BenchmarkTest[] = [ + // Basic endpoints + { + name: 'Total Data Endpoint', + urlGenerator: () => '/totalData', + connections: 100, + duration: 30, + description: 'Tests the aggregate data endpoint', + }, + { + name: 'Port Endpoint', + urlGenerator: () => '/port', + connections: 50, + duration: 10, + description: 'Simple port endpoint test', + }, + + // Cycle endpoints + { + name: 'Cycle Info - Latest (count=10)', + urlGenerator: () => '/api/cycleinfo?count=10', + connections: 100, + duration: 30, + description: 'Query latest 10 cycles', + }, + { + name: 'Cycle Info - Latest (count=50)', + urlGenerator: () => '/api/cycleinfo?count=50', + connections: 100, + duration: 30, + description: 'Query latest 50 cycles', + }, + { + name: 'Cycle Info - By Number', + urlGenerator: (data) => { + const cycleNum = getRandomItem(data.cycleNumbers) + return cycleNum !== null ? `/api/cycleinfo?cycleNumber=${cycleNum}` : '/api/cycleinfo?count=1' + }, + connections: 100, + duration: 30, + description: 'Query specific cycle by number', + }, + { + name: 'Cycle Info - By Marker', + urlGenerator: (data) => { + const marker = getRandomItem(data.cycleMarkers) + return marker !== null ? `/api/cycleinfo?marker=${marker}` : '/api/cycleinfo?count=1' + }, + connections: 100, + duration: 30, + description: 'Query cycle by marker', + }, + { + name: 'Cycle Info - Range Query', + urlGenerator: (data) => { + if (data.latestCycle > 100) { + const endCycle = data.latestCycle + const startCycle = endCycle - 50 + return `/api/cycleinfo?startCycle=${startCycle}&endCycle=${endCycle}` + } + return '/api/cycleinfo?count=10' + }, + connections: 50, + duration: 30, + description: 'Query cycle range (50 cycles)', + }, + + // Account endpoints + { + name: 'Account - Latest (count=10)', + urlGenerator: () => '/api/account?count=10', + connections: 100, + duration: 30, + description: 'Query latest 10 accounts', + }, + { + name: 'Account - By ID', + urlGenerator: (data) => { + const accountId = getRandomItem(data.accountIds) + return accountId !== null ? `/api/account?accountId=${accountId}` : '/api/account?count=1' + }, + connections: 150, + duration: 30, + description: 'Query specific account by ID (most common query)', + }, + { + name: 'Account - Paginated', + urlGenerator: () => { + const page = Math.floor(Math.random() * 10) + 1 + return `/api/account?page=${page}` + }, + connections: 50, + duration: 30, + description: 'Query accounts with pagination', + }, + + // Transaction endpoints + { + name: 'Transaction - Latest (count=10)', + urlGenerator: () => '/api/transaction?count=10', + connections: 100, + duration: 30, + description: 'Query latest 10 transactions', + }, + { + name: 'Transaction - Latest (count=50)', + urlGenerator: () => '/api/transaction?count=50', + connections: 100, + duration: 30, + description: 'Query latest 50 transactions', + }, + { + name: 'Transaction - By TxId', + urlGenerator: (data) => { + const txId = getRandomItem(data.txIds) + return txId !== null ? `/api/transaction?txId=${txId}` : '/api/transaction?count=1' + }, + connections: 150, + duration: 30, + description: 'Query specific transaction by ID (critical path)', + }, + { + name: 'Transaction - By TxId with Balance Changes', + urlGenerator: (data) => { + const txId = getRandomItem(data.txIds) + return txId !== null ? `/api/transaction?txId=${txId}&balanceChanges=true` : '/api/transaction?count=1' + }, + connections: 100, + duration: 30, + description: 'Query transaction with balance changes (heavier query)', + }, + { + name: 'Transaction - By AccountId', + urlGenerator: (data) => { + const accountId = getRandomItem(data.accountIds) + return accountId !== null ? `/api/transaction?accountId=${accountId}` : '/api/transaction?count=1' + }, + connections: 100, + duration: 30, + description: 'Query transactions for specific account', + }, + { + name: 'Transaction - Total Details', + urlGenerator: () => '/api/transaction?totalTxsDetail=true', + connections: 50, + duration: 20, + description: 'Query transaction statistics by type', + }, + + // Receipt endpoints + { + name: 'Receipt - Latest (count=10)', + urlGenerator: () => '/api/receipt?count=10', + connections: 100, + duration: 30, + description: 'Query latest 10 receipts', + }, + { + name: 'Receipt - By TxId', + urlGenerator: (data) => { + const receiptId = getRandomItem(data.receiptIds) + return receiptId !== null ? `/api/receipt?txId=${receiptId}` : '/api/receipt?count=1' + }, + connections: 100, + duration: 30, + description: 'Query specific receipt by transaction ID', + }, + + // Stats endpoints + { + name: 'Stats - Validator (count=100, array)', + urlGenerator: () => '/api/stats/validator?count=100&responseType=array', + connections: 100, + duration: 30, + description: 'Query validator stats (used for charts)', + }, + { + name: 'Stats - Validator (count=1000, array)', + urlGenerator: () => '/api/stats/validator?count=1000&responseType=array', + connections: 50, + duration: 30, + description: 'Query validator stats (heavy query, cached)', + }, + { + name: 'Stats - Transaction (count=100, array)', + urlGenerator: () => '/api/stats/transaction?count=100&responseType=array', + connections: 100, + duration: 30, + description: 'Query transaction stats', + }, + { + name: 'Stats - Transaction (last 14 days)', + urlGenerator: () => '/api/stats/transaction?last14DaysTxsReport=true&responseType=array', + connections: 100, + duration: 30, + description: 'Query daily transaction report', + }, + { + name: 'Stats - Transaction Summary', + urlGenerator: () => '/api/stats/transaction?fetchTransactionStats=true', + connections: 100, + duration: 20, + description: 'Query transaction statistics summary', + }, + { + name: 'Stats - Account Summary', + urlGenerator: () => '/api/stats/account?fetchAccountStats=true', + connections: 100, + duration: 20, + description: 'Query account statistics summary', + }, + { + name: 'Stats - Coin', + urlGenerator: () => '/api/stats/coin?fetchCoinStats=true', + connections: 100, + duration: 20, + description: 'Query coin statistics', + }, + { + name: 'Stats - Network', + urlGenerator: () => '/api/stats/network', + connections: 100, + duration: 20, + description: 'Query network statistics', + }, +] + +async function runSingleBenchmark( + test: BenchmarkTest, + testData: TestData, + baseUrl: string +): Promise { + console.log(`\n${'='.repeat(70)}`) + console.log(`Running: ${test.name}`) + if (test.description) { + console.log(`Description: ${test.description}`) + } + console.log('='.repeat(70)) + + const url = test.urlGenerator(testData) + const fullUrl = Array.isArray(url) ? url.map((u) => `${baseUrl}${u}`) : `${baseUrl}${url}` + + return new Promise((resolve, reject) => { + const config: autocannon.Options = { + url: Array.isArray(fullUrl) ? fullUrl[0] : fullUrl, + connections: test.connections, + duration: test.duration, + pipelining: test.pipelining || 1, + } + + autocannon(config, (err, result) => { + if (err) { + console.error('Error:', err) + reject(err) + return + } + + const benchResult: BenchmarkResult = { + name: test.name, + url: Array.isArray(url) ? url.join(', ') : url, + requestsPerSec: result.requests.average, + latencyAvg: result.latency.mean, + latencyP50: result.latency.p50, + latencyP95: result.latency.p97_5, + latencyP99: result.latency.p99, + throughputMBs: result.throughput.average / 1024 / 1024, + errors: result.errors, + timeouts: result.timeouts, + duration: test.duration, + connections: test.connections, + } + + console.log(`\nโœ“ Results:`) + console.log(` Requests/sec: ${benchResult.requestsPerSec.toFixed(2)}`) + console.log(` Latency (avg): ${benchResult.latencyAvg.toFixed(2)}ms`) + console.log(` Latency (p50): ${benchResult.latencyP50.toFixed(2)}ms`) + console.log(` Latency (p95): ${benchResult.latencyP95.toFixed(2)}ms`) + console.log(` Latency (p99): ${benchResult.latencyP99.toFixed(2)}ms`) + console.log(` Throughput: ${benchResult.throughputMBs.toFixed(2)} MB/s`) + console.log(` Errors: ${benchResult.errors}`) + console.log(` Timeouts: ${benchResult.timeouts}`) + + resolve(benchResult) + }) + }) +} + +function printSummary(results: BenchmarkResult[]): void { + console.log('\n\n') + console.log('โ•'.repeat(120)) + console.log('BENCHMARK SUMMARY') + console.log('โ•'.repeat(120)) + console.log( + `${'Test Name'.padEnd(50)} | ${'Req/s'.padStart(10)} | ${'Avg(ms)'.padStart(10)} | ${'P95(ms)'.padStart( + 10 + )} | ${'P99(ms)'.padStart(10)} | ${'Errors'.padStart(8)}` + ) + console.log('โ”€'.repeat(120)) + + results.forEach((result) => { + console.log( + `${result.name.padEnd(50)} | ${result.requestsPerSec.toFixed(2).padStart(10)} | ${result.latencyAvg + .toFixed(2) + .padStart(10)} | ${result.latencyP95.toFixed(2).padStart(10)} | ${result.latencyP99 + .toFixed(2) + .padStart(10)} | ${result.errors.toString().padStart(8)}` + ) + }) + + console.log('โ•'.repeat(120)) + + // Calculate aggregate stats + const totalRequests = results.reduce((sum, r) => sum + r.requestsPerSec * r.duration, 0) + const avgLatency = results.reduce((sum, r) => sum + r.latencyAvg, 0) / results.length + const totalErrors = results.reduce((sum, r) => sum + r.errors, 0) + + console.log('\nAggregate Statistics:') + console.log(` Total tests run: ${results.length}`) + console.log(` Total requests: ${totalRequests.toFixed(0)}`) + console.log(` Average latency: ${avgLatency.toFixed(2)}ms`) + console.log(` Total errors: ${totalErrors}`) + + // Identify slowest endpoints + const slowest = [...results].sort((a, b) => b.latencyP99 - a.latencyP99).slice(0, 3) + console.log('\nโš ๏ธ Slowest endpoints (p99 latency):') + slowest.forEach((result, i) => { + console.log(` ${i + 1}. ${result.name}: ${result.latencyP99.toFixed(2)}ms`) + }) + + // Identify highest throughput + const fastest = [...results].sort((a, b) => b.requestsPerSec - a.requestsPerSec).slice(0, 3) + console.log('\nโœ“ Highest throughput endpoints:') + fastest.forEach((result, i) => { + console.log(` ${i + 1}. ${result.name}: ${result.requestsPerSec.toFixed(2)} req/s`) + }) + + console.log('\n') +} + +async function runBenchmarkSuite(options: { + baseUrl?: string + delayBetweenTests?: number + testsToRun?: string[] + sampleSize?: number +}): Promise { + const { + baseUrl = 'http://127.0.0.1:6001', + delayBetweenTests = 5000, + testsToRun, + sampleSize = 100000, + } = options + + console.log('โ•'.repeat(70)) + console.log('LIBERDUS EXPLORER API BENCHMARK SUITE') + console.log('โ•'.repeat(70)) + console.log(`Base URL: ${baseUrl}`) + console.log(`Sample size: ${sampleSize}`) + console.log(`Delay between tests: ${delayBetweenTests}ms`) + console.log('โ•'.repeat(70)) + + // Collect test data + const testData = await collectTestData(sampleSize) + + // Filter tests if specified + let testsToExecute = tests + if (testsToRun && testsToRun.length > 0) { + testsToExecute = tests.filter((test) => + testsToRun.some((name) => test.name.toLowerCase().includes(name.toLowerCase())) + ) + console.log(`\nRunning ${testsToExecute.length} filtered tests`) + } else { + console.log(`\nRunning all ${testsToExecute.length} tests`) + } + + const results: BenchmarkResult[] = [] + + for (let i = 0; i < testsToExecute.length; i++) { + const test = testsToExecute[i] + console.log(`\n[${i + 1}/${testsToExecute.length}]`) + + try { + const result = await runSingleBenchmark(test, testData, baseUrl) + results.push(result) + } catch (error) { + console.error(`Failed to run test: ${test.name}`, error) + } + + // Wait between tests to let the server recover + if (i < testsToExecute.length - 1) { + console.log(`\nWaiting ${delayBetweenTests / 1000}s before next test...`) + await new Promise((resolve) => setTimeout(resolve, delayBetweenTests)) + } + } + + printSummary(results) + + return results +} + +// CLI interface +if (require.main === module) { + const args = process.argv.slice(2) + const baseUrl = args.find((arg) => arg.startsWith('--url='))?.split('=')[1] || 'http://127.0.0.1:6001' + const sampleSize = parseInt(args.find((arg) => arg.startsWith('--sample='))?.split('=')[1] || '100000') + const delayBetweenTests = parseInt(args.find((arg) => arg.startsWith('--delay='))?.split('=')[1] || '5000') + const testsFilter = args.filter((arg) => !arg.startsWith('--')) + + runBenchmarkSuite({ + baseUrl, + delayBetweenTests, + testsToRun: testsFilter.length > 0 ? testsFilter : undefined, + sampleSize, + }) + .then(() => { + console.log('โœ“ Benchmark suite complete!') + process.exit(0) + }) + .catch((error) => { + console.error('Benchmark suite failed:', error) + process.exit(1) + }) +} + +export { runBenchmarkSuite } +export type { BenchmarkResult, BenchmarkTest } diff --git a/package.json b/package.json index 95772f7..453ca93 100644 --- a/package.json +++ b/package.json @@ -18,13 +18,25 @@ "check": "gts check", "clean": "gts clean", "compile": "tsc -p .", + "compile:benchmark": "tsc -p tsconfig.benchmark.json", "fix": "gts fix", "prepare": "npm run compile", "flush": "rm -fr collector-db/ collector-stats-db/ data-logs/", "release:prerelease": "npm run prepare && npm run build && npm version prerelease --preid=prerelease && git push --follow-tags && npm publish --tag prerelease", "release:patch": "npm run prepare && npm run build && npm version patch && git push --follow-tags && npm publish", "release:minor": "npm run prepare && npm run build && npm version minor && git push --follow-tags && npm publish", - "release:major": "npm run prepare && npm run build && npm version major && git push --follow-tags && npm publish" + "release:major": "npm run prepare && npm run build && npm version major && git push --follow-tags && npm publish", + "benchmark": "npm run compile:benchmark && node dist/benchmark/test-suite.js", + "benchmark:collect-data": "npm run compile:benchmark && node dist/benchmark/data-collector.js", + "benchmark:quick": "npm run compile:benchmark && node dist/benchmark/test-suite.js transaction account", + "benchmark:example": "npm run compile:benchmark && node dist/benchmark/example.js", + "benchmark:stats": "npm run compile:benchmark && node dist/benchmark/test-suite.js stats", + "benchmark:cycles": "npm run compile:benchmark && node dist/benchmark/test-suite.js cycle", + "benchmark:autocannon-advanced": "npm run compile:benchmark && node dist/benchmark/autocannon-advanced.js", + "benchmark:export-data": "npm run compile:benchmark && node dist/benchmark/export-test-data.js", + "benchmark:artillery-accounts": "artillery run benchmark/artillery-accounts.yml", + "benchmark:artillery-transactions": "artillery run benchmark/artillery-transactions.yml", + "benchmark:artillery-combined": "artillery run benchmark/artillery-combined.yml" }, "dependencies": { "@fastify/cors": "8.3.0", diff --git a/tsconfig.benchmark.json b/tsconfig.benchmark.json new file mode 100644 index 0000000..9c6d29b --- /dev/null +++ b/tsconfig.benchmark.json @@ -0,0 +1,37 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "dist", + "allowJs": true, + "skipLibCheck": true, + "strict": false, + "noEmitOnError": false, + "esModuleInterop": true, + "resolveJsonModule": true, + "noImplicitReturns": false, + "noEmit": false, + "incremental": false, + "moduleResolution": "node", + "isolatedModules": true, + "jsx": "preserve" + }, + "include": [ + "benchmark/**/*", + "src/storage/**/*", + "src/stats/**/*", + "src/types/**/*", + "src/class/**/*", + "src/config/**/*", + "src/utils/**/*" + ], + "exclude": [ + "node_modules", + "dist", + "src/pages/**/*", + "src/frontend/**/*", + "src/server.ts", + "src/collector.ts", + "src/aggregator.ts" + ] +} From bd535a6685f2ec13d3e387f10f41dca0512b69da Mon Sep 17 00:00:00 2001 From: jairajdev Date: Fri, 12 Dec 2025 19:32:27 +0800 Subject: [PATCH 3/3] feat: enhance benchmark data collection and add selective fields and random data to all db query functions --- .gitignore | 3 ++ benchmark/CLI-QUICKSTART.md | 6 +++- benchmark/CLI-TESTING.md | 50 +++++++++++++++++++++--------- benchmark/QUICKSTART.md | 4 ++- benchmark/README.md | 5 ++- benchmark/SUMMARY.md | 7 +++-- benchmark/data-collector.ts | 16 +++++----- benchmark/export-test-data.ts | 6 ++-- package.json | 6 ++-- src/stats/coinStats.ts | 13 ++++++-- src/stats/dailyAccountStats.ts | 13 ++++++-- src/stats/dailyCoinStats.ts | 13 ++++++-- src/stats/dailyNetworkStats.ts | 13 ++++++-- src/stats/dailyTransactionStats.ts | 24 ++++++++++++-- src/stats/nodeStats.ts | 13 ++++++-- src/stats/totalAccountBalance.ts | 11 +++++-- src/stats/transactionStats.ts | 13 ++++++-- src/stats/validatorStats.ts | 13 ++++++-- src/storage/account.ts | 28 +++++++++++++---- src/storage/cycle.ts | 21 +++++++++++-- src/storage/originalTxData.ts | 24 +++++++++++--- src/storage/receipt.ts | 25 ++++++++++++--- src/storage/transaction.ts | 22 +++++++++++-- 23 files changed, 276 insertions(+), 73 deletions(-) diff --git a/.gitignore b/.gitignore index 4917f60..ec230f6 100644 --- a/.gitignore +++ b/.gitignore @@ -39,9 +39,12 @@ yarn-error.log* next-env.d.ts benchmark-data +artillery-results.json +artillery-results.json.html collector-db collector-stats-db + collector.log server.log aggregator.log diff --git a/benchmark/CLI-QUICKSTART.md b/benchmark/CLI-QUICKSTART.md index 221c621..2512f1b 100644 --- a/benchmark/CLI-QUICKSTART.md +++ b/benchmark/CLI-QUICKSTART.md @@ -10,7 +10,7 @@ Load test your API from the command line using exported test data! npm run benchmark:export-data ``` -This creates `benchmark-data/` with CSV/JSON files containing 100000+ real IDs. +This creates `benchmark-data/` with CSV/JSON files containing 100000+ real IDs (collected in random order). ### 2. Choose Your Tool @@ -30,6 +30,8 @@ npm run benchmark:artillery-transactions npm run benchmark:artillery-combined ``` +**Note**: All npm scripts automatically save results to `artillery-results.json` and generate an HTML report (`artillery-results.json.html`) that opens in your browser. + #### **Option B: autocannon (Advanced, Random IDs)** ```bash @@ -64,6 +66,8 @@ p99: 198 ms Errors: 0 ``` +Plus an interactive HTML report with charts and graphs! + ### autocannon-advanced Output ``` diff --git a/benchmark/CLI-TESTING.md b/benchmark/CLI-TESTING.md index af3ec61..360ae2a 100644 --- a/benchmark/CLI-TESTING.md +++ b/benchmark/CLI-TESTING.md @@ -6,7 +6,7 @@ This guide shows how to run load tests directly from the command line using expo ### 1. Export Test Data -First, export your database IDs to files: +First, export your database IDs to files (collected in random order): ```bash npm run benchmark:export-data @@ -24,6 +24,8 @@ benchmark-data/ โ””โ”€โ”€ combined.csv # All data in one CSV ``` +**Note**: Data is retrieved using `ORDER BY RANDOM()` to ensure diverse sampling and avoid sequential patterns. + ### 2. Choose Your Tool #### **Option A: Artillery (Recommended for CLI)** @@ -49,37 +51,45 @@ npm install -g artillery #### Test Accounts Only ```bash +# Auto-generates HTML report + shows console metrics npm run benchmark:artillery-accounts -# or + +# Or run directly with Artillery CLI (console only, no HTML) artillery run benchmark/artillery-accounts.yml ``` **What it does:** -- Loads 100000 account IDs from CSV +- Loads 100000 account IDs from CSV (collected randomly) - Tests: `GET /api/account?accountId={randomId}` - Phases: 60s warmup โ†’ 120s sustained load โ†’ 60s spike - Connections: 50 โ†’ 100 โ†’ 200 per second +- **npm script auto-generates**: JSON results + HTML report #### Test Transactions Only ```bash +# Auto-generates HTML report + shows console metrics npm run benchmark:artillery-transactions -# or + +# Or run directly with Artillery CLI (console only, no HTML) artillery run benchmark/artillery-transactions.yml ``` **What it does:** -- Loads 100000 transaction IDs from CSV +- Loads 100000 transaction IDs from CSV (collected randomly) - 70% regular queries, 30% with balance changes - Same load phases as accounts +- **npm script auto-generates**: JSON results + HTML report #### Test Combined Workload ```bash +# Auto-generates HTML report + shows console metrics (recommended) npm run benchmark:artillery-combined -# or + +# Or run directly with Artillery CLI (console only, no HTML) artillery run benchmark/artillery-combined.yml ``` @@ -88,6 +98,23 @@ artillery run benchmark/artillery-combined.yml - Uses combined CSV with all data types - 50% transactions, 30% accounts, 10% receipts, 10% cycles - Simulates realistic mixed traffic +- Data collected in random order for diverse sampling +- **npm script auto-generates**: JSON results + HTML report + +### What You Get + +When using the npm scripts (`npm run benchmark:artillery-*`), you automatically get: + +1. **Real-time console metrics** during the test +2. **JSON results** saved to `artillery-results.json` +3. **Interactive HTML report** (`artillery-results.json.html`) with: + - ๐Ÿ“Š Interactive charts and graphs + - ๐Ÿ“ˆ Response time distribution + - ๐ŸŽฏ Latency percentiles visualization + - ๐Ÿ“‹ Detailed scenario breakdowns + - ๐Ÿ”ฅ Request rate over time + +**Note**: You may see a deprecation warning about `artillery report` - it still works fine for local HTML generation. ### Custom Artillery Options @@ -102,6 +129,9 @@ artillery run --target http://production-server.com benchmark/artillery-accounts ```bash # Run for 5 minutes with 200 req/s artillery run --duration 300 --arrival-rate 200 benchmark/artillery-accounts.yml + +# Add --quiet for cleaner output +artillery run --quiet --duration 300 --arrival-rate 200 benchmark/artillery-accounts.yml ``` #### Override Environment Variables @@ -110,14 +140,6 @@ artillery run --duration 300 --arrival-rate 200 benchmark/artillery-accounts.yml API_URL=http://127.0.0.1:3000 artillery run benchmark/artillery-accounts.yml ``` -#### Generate HTML Report - -```bash -artillery run --output results.json benchmark/artillery-combined.yml -artillery report results.json --output report.html -open report.html # Beautiful charts and graphs! -``` - ### Artillery Configuration Edit the YAML files to customize: diff --git a/benchmark/QUICKSTART.md b/benchmark/QUICKSTART.md index baedb17..348430c 100644 --- a/benchmark/QUICKSTART.md +++ b/benchmark/QUICKSTART.md @@ -123,11 +123,13 @@ npm run benchmark -- transaction # Filter by keyword ### Real Data Testing The benchmark suite: -1. **Connects to your database** and collects 100000 real IDs +1. **Connects to your database** and collects 100000 real IDs in random order 2. **Randomizes requests** to simulate different users 3. **Tests actual query patterns** (by ID, by range, paginated, etc.) 4. **Measures performance** under realistic load +**Note**: Data collection uses `ORDER BY RANDOM()` to ensure a diverse sample set and avoid sequential query patterns. + ### Test Coverage - โœ… 25+ different endpoint configurations - โœ… 50-150 concurrent connections per test diff --git a/benchmark/README.md b/benchmark/README.md index dfb8766..62ea80f 100644 --- a/benchmark/README.md +++ b/benchmark/README.md @@ -5,6 +5,7 @@ Comprehensive load testing and benchmarking suite for the Liberdus Explorer API ## Features - **Real Data Testing**: Uses actual accountIds, txIds, cycle numbers, and markers from your database +- **Random Data Selection**: Retrieves data in random order to simulate realistic query patterns - **Comprehensive Coverage**: Tests all major API endpoints (accounts, transactions, cycles, receipts, stats) - **Realistic Load Patterns**: Randomized queries simulating real user behavior - **Detailed Metrics**: Requests/sec, latency (avg, p50, p95, p99), throughput, errors @@ -33,7 +34,7 @@ Comprehensive load testing and benchmarking suite for the Liberdus Explorer API ### 1. Collect Test Data -First, verify your database has data and collect sample IDs: +First, verify your database has data and collect sample IDs (in random order): ```bash npm run benchmark:collect-data @@ -48,6 +49,8 @@ This will display sample data like: โœ“ Collected 100000 cycle numbers ``` +**Note**: Data is collected using `ORDER BY RANDOM()` to ensure diverse query patterns during benchmarking. + ### 2. Run Benchmarks Run all benchmark tests: diff --git a/benchmark/SUMMARY.md b/benchmark/SUMMARY.md index 033074f..9636853 100644 --- a/benchmark/SUMMARY.md +++ b/benchmark/SUMMARY.md @@ -62,7 +62,8 @@ npm run benchmark:example - โœ… Queries actual `accountId`, `txId`, `receiptId` from your database - โœ… Tests real cycle numbers and markers -- โœ… Randomizes data to avoid cache effects +- โœ… Collects data in random order using `ORDER BY RANDOM()` +- โœ… Randomizes data to avoid cache effects and sequential patterns - โœ… Simulates realistic user query patterns ### Comprehensive Coverage @@ -103,8 +104,8 @@ Tests all major endpoints: - **Connections**: 50-150 concurrent (per test) - **Duration**: 10-30 seconds per test -- **Sample size**: 100000 real IDs from database -- **Randomization**: Each request uses different IDs +- **Sample size**: 100000 real IDs from database (collected randomly) +- **Randomization**: Each request uses different IDs from the random sample ## ๐ŸŽฏ Use Cases diff --git a/benchmark/data-collector.ts b/benchmark/data-collector.ts index 6d8284b..f3706a7 100644 --- a/benchmark/data-collector.ts +++ b/benchmark/data-collector.ts @@ -36,36 +36,36 @@ export async function collectTestData(sampleSize = 100000): Promise { } try { - // Collect account IDs + // Collect account IDs (only select accountId field for performance) console.log('Fetching account IDs...') - const accounts = await AccountDB.queryAccounts({ limit: sampleSize }) + const accounts = await AccountDB.queryAccounts({ limit: sampleSize, random: true, select: 'accountId' }) testData.accountIds = accounts.map(acc => acc.accountId).filter(Boolean) if (testData.accountIds.length > 0) { testData.validAccountId = testData.accountIds[0] } console.log(` โœ“ Collected ${testData.accountIds.length} account IDs`) - // Collect transaction IDs + // Collect transaction IDs (only select txId field for performance) console.log('Fetching transaction IDs...') - const transactions = await TransactionDB.queryTransactions({ limit: sampleSize }) + const transactions = await TransactionDB.queryTransactions({ limit: sampleSize, random: true, select: 'txId' }) testData.txIds = transactions.map(tx => tx.txId).filter(Boolean) if (testData.txIds.length > 0) { testData.validTxId = testData.txIds[0] } console.log(` โœ“ Collected ${testData.txIds.length} transaction IDs`) - // Collect receipt IDs + // Collect receipt IDs (only select receiptId field for performance) console.log('Fetching receipt IDs...') - const receipts = await ReceiptDB.queryReceipts({ limit: sampleSize }) + const receipts = await ReceiptDB.queryReceipts({ limit: sampleSize, random: true, select: 'receiptId' }) testData.receiptIds = receipts.map(r => r.receiptId).filter(Boolean) if (testData.receiptIds.length > 0) { testData.validReceiptId = testData.receiptIds[0] } console.log(` โœ“ Collected ${testData.receiptIds.length} receipt IDs`) - // Collect cycle data + // Collect cycle data (only select counter and cycleMarker fields for performance) console.log('Fetching cycle data...') - const cycles = await CycleDB.queryLatestCycleRecords(sampleSize) + const cycles = await CycleDB.queryLatestCycleRecords(sampleSize, true, ['counter', 'cycleMarker']) testData.cycleNumbers = cycles.map(c => c.counter).filter(n => n !== undefined) testData.cycleMarkers = cycles.map(c => c.cycleMarker as string).filter(Boolean) diff --git a/benchmark/export-test-data.ts b/benchmark/export-test-data.ts index a820249..9b98cf6 100644 --- a/benchmark/export-test-data.ts +++ b/benchmark/export-test-data.ts @@ -14,9 +14,9 @@ interface ExportedTestData { cycleMarkers: string[] } -async function exportTestData() { - const sampleSize = parseInt(process.argv[2]) || 500 - const outputDir = path.join(__dirname, '../benchmark-data') +async function exportTestData(): Promise { + const sampleSize = parseInt(process.argv[2]) || 100000 + const outputDir = path.join(__dirname, '../../benchmark-data') console.log(`Exporting ${sampleSize} test data samples...\n`) diff --git a/package.json b/package.json index 453ca93..e062e98 100644 --- a/package.json +++ b/package.json @@ -34,9 +34,9 @@ "benchmark:cycles": "npm run compile:benchmark && node dist/benchmark/test-suite.js cycle", "benchmark:autocannon-advanced": "npm run compile:benchmark && node dist/benchmark/autocannon-advanced.js", "benchmark:export-data": "npm run compile:benchmark && node dist/benchmark/export-test-data.js", - "benchmark:artillery-accounts": "artillery run benchmark/artillery-accounts.yml", - "benchmark:artillery-transactions": "artillery run benchmark/artillery-transactions.yml", - "benchmark:artillery-combined": "artillery run benchmark/artillery-combined.yml" + "benchmark:artillery-accounts": "artillery run --output artillery-results.json benchmark/artillery-accounts.yml && artillery report artillery-results.json", + "benchmark:artillery-transactions": "artillery run --output artillery-results.json benchmark/artillery-transactions.yml && artillery report artillery-results.json", + "benchmark:artillery-combined": "artillery run --output artillery-results.json benchmark/artillery-combined.yml && artillery report artillery-results.json" }, "dependencies": { "@fastify/cors": "8.3.0", diff --git a/src/stats/coinStats.ts b/src/stats/coinStats.ts index d985ee2..a92847c 100644 --- a/src/stats/coinStats.ts +++ b/src/stats/coinStats.ts @@ -57,9 +57,18 @@ export async function bulkInsertCoinsStats(coinStats: CoinStats[]): Promise { +export async function queryLatestCoinStats( + count?: number, + select: keyof CoinStats | (keyof CoinStats)[] | 'all' = 'all' +): Promise { try { - const sql = `SELECT * FROM coin_stats ORDER BY cycle DESC LIMIT ${count ? count : 100}` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + const sql = `SELECT ${selectClause} FROM coin_stats ORDER BY cycle DESC LIMIT ${count ? count : 100}` const coinStats: CoinStats[] = await db.all(coinStatsDatabase, sql) if (config.verbose) console.log('coinStats count', coinStats) return coinStats diff --git a/src/stats/dailyAccountStats.ts b/src/stats/dailyAccountStats.ts index f471391..1881704 100644 --- a/src/stats/dailyAccountStats.ts +++ b/src/stats/dailyAccountStats.ts @@ -91,9 +91,18 @@ export async function bulkInsertAccountsStats(dailyAccountsStats: DbDailyAccount } } -export async function queryLatestDailyAccountStats(count: number): Promise { +export async function queryLatestDailyAccountStats( + count: number, + select: keyof DailyAccountStats | (keyof DailyAccountStats)[] | 'all' = 'all' +): Promise { try { - const sql = `SELECT * FROM daily_accounts ORDER BY dateStartTime DESC ${count ? 'LIMIT ' + count : ''}` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + const sql = `SELECT ${selectClause} FROM daily_accounts ORDER BY dateStartTime DESC ${count ? 'LIMIT ' + count : ''}` const dailyAccountsStats: DbDailyAccountStats[] = await db.all(dailyAccountStatsDatabase, sql) if (config.verbose) console.log('dailyAccountStats count', dailyAccountsStats) return dailyAccountsStats diff --git a/src/stats/dailyCoinStats.ts b/src/stats/dailyCoinStats.ts index 5a8b795..4ead075 100644 --- a/src/stats/dailyCoinStats.ts +++ b/src/stats/dailyCoinStats.ts @@ -113,9 +113,18 @@ export async function bulkInsertCoinStats(dailyCoinStats: DbDailyCoinStats[]): P } } -export async function queryLatestDailyCoinStats(count: number): Promise { +export async function queryLatestDailyCoinStats( + count: number, + select: keyof DailyCoinStats | (keyof DailyCoinStats)[] | 'all' = 'all' +): Promise { try { - const sql = `SELECT * FROM daily_coin_stats ORDER BY dateStartTime DESC ${count ? 'LIMIT ' + count : ''}` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + const sql = `SELECT ${selectClause} FROM daily_coin_stats ORDER BY dateStartTime DESC ${count ? 'LIMIT ' + count : ''}` const dailyCoinStats: DbDailyCoinStats[] = await db.all(dailyCoinStatsDatabase, sql) if (config.verbose) console.log('dailyCoinStats count', dailyCoinStats) return dailyCoinStats diff --git a/src/stats/dailyNetworkStats.ts b/src/stats/dailyNetworkStats.ts index 28e5fca..4e8a9b2 100644 --- a/src/stats/dailyNetworkStats.ts +++ b/src/stats/dailyNetworkStats.ts @@ -92,9 +92,18 @@ export async function bulkInsertNetworkStats(dailyNetworkStats: DbDailyNetworkSt } } -export async function queryLatestDailyNetworkStats(count: number): Promise { +export async function queryLatestDailyNetworkStats( + count: number, + select: keyof DailyNetworkStats | (keyof DailyNetworkStats)[] | 'all' = 'all' +): Promise { try { - const sql = `SELECT * FROM daily_network ORDER BY dateStartTime DESC ${count ? 'LIMIT ' + count : ''}` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + const sql = `SELECT ${selectClause} FROM daily_network ORDER BY dateStartTime DESC ${count ? 'LIMIT ' + count : ''}` const dailyNetworkStats: DbDailyNetworkStats[] = await db.all(dailyNetworkStatsDatabase, sql) if (config.verbose) console.log('dailyNetworkStats count', dailyNetworkStats) return dailyNetworkStats diff --git a/src/stats/dailyTransactionStats.ts b/src/stats/dailyTransactionStats.ts index 7a66aa9..6dcc957 100644 --- a/src/stats/dailyTransactionStats.ts +++ b/src/stats/dailyTransactionStats.ts @@ -78,15 +78,33 @@ export async function bulkInsertTransactionsStats( export async function queryLatestDailyTransactionStats( count: number, - txsWithFee = false + txsWithFee = false, + select: keyof DbDailyTransactionStats | (keyof DbDailyTransactionStats)[] | 'all' = 'all' ): Promise { try { - const sql = `SELECT * FROM daily_transactions ORDER BY dateStartTime DESC ${ + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + const sql = `SELECT ${selectClause} FROM daily_transactions ORDER BY dateStartTime DESC ${ count ? 'LIMIT ' + count : '' }` const dailyTransactionsStats: DbDailyTransactionStats[] = await db.all(dailyTransactionStatsDatabase, sql) if (config.verbose) console.log('dailyTransactionStats count', dailyTransactionsStats) - return parseDailyTransactionStats(dailyTransactionsStats, txsWithFee) + + // Only parse JSON fields if they were selected + const shouldParse = select === 'all' || + (Array.isArray(select) && (select.includes('txsByType') || select.includes('txsWithFeeByType'))) || + select === 'txsByType' || select === 'txsWithFeeByType' + + if (shouldParse) { + return parseDailyTransactionStats(dailyTransactionsStats, txsWithFee) + } + + // Return raw results without parsing if JSON fields not selected + return dailyTransactionsStats as unknown as DailyTransactionStats[] } catch (e) { console.log(e) return [] diff --git a/src/stats/nodeStats.ts b/src/stats/nodeStats.ts index 6a0951f..cef62fc 100644 --- a/src/stats/nodeStats.ts +++ b/src/stats/nodeStats.ts @@ -77,9 +77,18 @@ export async function insertOrUpdateNodeStats(nodeStats: NodeStats): Promise { +export async function queryLatestNodeStats( + limit = 100, + select: keyof NodeStats | (keyof NodeStats)[] | 'all' = 'all' +): Promise { try { - const sql = 'SELECT * FROM node_stats ORDER BY timestamp DESC LIMIT ?' + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + const sql = `SELECT ${selectClause} FROM node_stats ORDER BY timestamp DESC LIMIT ?` const nodeStats: NodeStats[] = await db.all(nodeStatsDatabase, sql, [limit]) return nodeStats } catch (e) { diff --git a/src/stats/totalAccountBalance.ts b/src/stats/totalAccountBalance.ts index ac3b96e..1de1f9c 100644 --- a/src/stats/totalAccountBalance.ts +++ b/src/stats/totalAccountBalance.ts @@ -48,11 +48,18 @@ export async function insertTotalAccountBalance(totalAccountBalance: TotalAccoun export async function queryTotalAccountBalances( skip = 0, limit = 100, - cycle?: number + cycle?: number, + select: keyof TotalAccountBalance | (keyof TotalAccountBalance)[] | 'all' = 'all' ): Promise { let totalAccountBalances: TotalAccountBalance[] = [] try { - let sql = `SELECT * FROM total_account_balances` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + let sql = `SELECT ${selectClause} FROM total_account_balances` const values: unknown[] = [] if (cycle !== undefined) { sql += ` WHERE cycleNumber = ?` diff --git a/src/stats/transactionStats.ts b/src/stats/transactionStats.ts index bebdabd..af13602 100644 --- a/src/stats/transactionStats.ts +++ b/src/stats/transactionStats.ts @@ -174,9 +174,18 @@ export async function bulkInsertTransactionsStats(transactionsStats: Transaction } } -export async function queryLatestTransactionStats(count: number): Promise { +export async function queryLatestTransactionStats( + count: number, + select: keyof TransactionStats | (keyof TransactionStats)[] | 'all' = 'all' +): Promise { try { - const sql = `SELECT * FROM transactions ORDER BY cycle DESC LIMIT ${count ? count : 100}` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + const sql = `SELECT ${selectClause} FROM transactions ORDER BY cycle DESC LIMIT ${count ? count : 100}` const transactionsStats: TransactionStats[] = await db.all(transactionStatsDatabase, sql) if (config.verbose) console.log('transactionStats count', transactionsStats) return transactionsStats diff --git a/src/stats/validatorStats.ts b/src/stats/validatorStats.ts index a40f8bb..2997e62 100644 --- a/src/stats/validatorStats.ts +++ b/src/stats/validatorStats.ts @@ -77,9 +77,18 @@ export async function bulkInsertValidatorsStats(validators: ValidatorStats[]): P } } -export async function queryLatestValidatorStats(count: number): Promise { +export async function queryLatestValidatorStats( + count: number, + select: keyof ValidatorStats | (keyof ValidatorStats)[] | 'all' = 'all' +): Promise { try { - const sql = `SELECT * FROM validators ORDER BY cycle DESC LIMIT ${count ? count : 100}` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + const sql = `SELECT ${selectClause} FROM validators ORDER BY cycle DESC LIMIT ${count ? count : 100}` const validatorsStats: ValidatorStats[] = await db.all(validatorStatsDatabase, sql) if (config.verbose) console.log('validatorStats count', validatorsStats) return validatorsStats diff --git a/src/storage/account.ts b/src/storage/account.ts index 2d1fbef..70dca35 100644 --- a/src/storage/account.ts +++ b/src/storage/account.ts @@ -152,13 +152,21 @@ export async function queryAccountCount(query: QueryAccountCountParams | null = type QueryAccountsParams = QueryAccountCountParams & { skip?: number limit?: number /* default 10, set 0 for all */ + random?: boolean /* if true, returns results in random order */ + select?: keyof Account | (keyof Account)[] | 'all' /* fields to select, defaults to 'all' */ } export async function queryAccounts(query: QueryAccountsParams): Promise { - const { skip = 0, limit = 10, startCycleNumber, endCycleNumber, type } = query + const { skip = 0, limit = 10, startCycleNumber, endCycleNumber, type, random = false, select = 'all' } = query let accounts: DbAccount[] = [] try { - let sql = `SELECT * FROM accounts` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + let sql = `SELECT ${selectClause} FROM accounts` const values: unknown[] = [] if (type) { sql = db.updateSqlStatementClause(sql, values) @@ -170,7 +178,9 @@ export async function queryAccounts(query: QueryAccountsParams): Promise { - if (account.data) account.data = StringUtils.safeJsonParse(account.data) - }) + // Only parse data field if it was selected + const shouldParseData = select === 'all' || + (Array.isArray(select) && select.includes('data')) || + select === 'data' + if (shouldParseData) { + accounts.forEach((account: DbAccount) => { + if (account.data) account.data = StringUtils.safeJsonParse(account.data) + }) + } } catch (e) { console.log(e) } diff --git a/src/storage/cycle.ts b/src/storage/cycle.ts index 1bb1f43..32e2f24 100644 --- a/src/storage/cycle.ts +++ b/src/storage/cycle.ts @@ -120,11 +120,26 @@ export async function insertOrUpdateCycle(cycle: Cycle): Promise { } } -export async function queryLatestCycleRecords(count: number): Promise { +export async function queryLatestCycleRecords( + count: number, + random = false, + select: keyof Cycle | (keyof Cycle)[] | 'all' = 'all' +): Promise { try { - const sql = `SELECT * FROM cycles ORDER BY counter DESC LIMIT ${count}` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + const orderBy = random ? 'RANDOM()' : 'counter DESC' + const sql = `SELECT ${selectClause} FROM cycles ORDER BY ${orderBy} LIMIT ${count}` const cycleRecords = (await db.all(cycleDatabase, sql)) as DbCycle[] - if (cycleRecords.length > 0) { + // Only parse cycleRecord field if it was selected + const shouldParseCycleRecord = select === 'all' || + (Array.isArray(select) && select.includes('cycleRecord')) || + select === 'cycleRecord' + if (cycleRecords.length > 0 && shouldParseCycleRecord) { cycleRecords.forEach((cycleRecord: DbCycle) => { if (cycleRecord.cycleRecord) cycleRecord.cycleRecord = StringUtils.safeJsonParse(cycleRecord.cycleRecord) diff --git a/src/storage/originalTxData.ts b/src/storage/originalTxData.ts index 50f37b2..a5447ad 100644 --- a/src/storage/originalTxData.ts +++ b/src/storage/originalTxData.ts @@ -121,6 +121,8 @@ type QueryOriginalTxDataCountParams = { type QueryOriginalTxsDataParams = QueryOriginalTxDataCountParams & { skip?: number limit?: number /* default 10, set 0 for all */ + random?: boolean /* if true, returns results in random order */ + select?: keyof OriginalTxData | (keyof OriginalTxData)[] | 'all' /* fields to select, defaults to 'all' */ } export async function queryOriginalTxDataCount( @@ -160,10 +162,16 @@ export async function queryOriginalTxDataCount( } export async function queryOriginalTxsData(query: QueryOriginalTxsDataParams): Promise { - const { skip = 0, limit = 10, accountId, startCycle, endCycle, txType, afterTimestamp } = query + const { skip = 0, limit = 10, accountId, startCycle, endCycle, txType, afterTimestamp, random = false, select = 'all' } = query let originalTxsData: DbOriginalTxData[] = [] try { - let sql = `SELECT * FROM originalTxsData` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + let sql = `SELECT ${selectClause} FROM originalTxsData` const values: unknown[] = [] if (accountId) { sql = db.updateSqlStatementClause(sql, values) @@ -185,7 +193,9 @@ export async function queryOriginalTxsData(query: QueryOriginalTxsDataParams): P sql += `timestamp>?` values.push(afterTimestamp) } - if (startCycle || endCycle) { + if (random) { + sql += ` ORDER BY RANDOM()` + } else if (startCycle || endCycle) { sql += ` ORDER BY cycle ASC, timestamp ASC` } else { sql += ` ORDER BY cycle DESC, timestamp DESC` @@ -197,7 +207,13 @@ export async function queryOriginalTxsData(query: QueryOriginalTxsDataParams): P sql += ` OFFSET ${skip}` } originalTxsData = (await db.all(originalTxDataDatabase, sql, values)) as DbOriginalTxData[] - originalTxsData.forEach((originalTxData: DbOriginalTxData) => deserializeDbOriginalTxData(originalTxData)) + // Only deserialize originalTxData field if it was selected + const shouldDeserialize = select === 'all' || + (Array.isArray(select) && select.includes('originalTxData')) || + select === 'originalTxData' + if (shouldDeserialize) { + originalTxsData.forEach((originalTxData: DbOriginalTxData) => deserializeDbOriginalTxData(originalTxData)) + } } catch (e) { console.log(e) } diff --git a/src/storage/receipt.ts b/src/storage/receipt.ts index b05d103..80d8276 100644 --- a/src/storage/receipt.ts +++ b/src/storage/receipt.ts @@ -364,19 +364,29 @@ type QueryReceiptCountParams = { type QueryReceiptsParams = QueryReceiptCountParams & { skip?: number limit?: number /* default 10, set 0 for all */ + random?: boolean /* if true, returns results in random order */ + select?: keyof Receipt | (keyof Receipt)[] | 'all' /* fields to select, defaults to 'all' */ } export async function queryReceipts(query: QueryReceiptsParams): Promise { - const { skip = 0, limit = 10, startCycleNumber, endCycleNumber } = query + const { skip = 0, limit = 10, startCycleNumber, endCycleNumber, random = false, select = 'all' } = query let receipts: DbReceipt[] = [] try { - let sql = `SELECT * FROM receipts` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + let sql = `SELECT ${selectClause} FROM receipts` const values: unknown[] = [] if (startCycleNumber || endCycleNumber) { sql += ` WHERE cycle BETWEEN ? AND ?` values.push(startCycleNumber, endCycleNumber) } - if (startCycleNumber || endCycleNumber) { + if (random) { + sql += ` ORDER BY RANDOM()` + } else if (startCycleNumber || endCycleNumber) { sql += ` ORDER BY cycle ASC, timestamp ASC` } else { sql += ` ORDER BY cycle DESC, timestamp DESC` @@ -388,7 +398,14 @@ export async function queryReceipts(query: QueryReceiptsParams): Promise deserializeDbReceipt(receipt)) + // Only deserialize if any serialized fields were selected + const serializableFields = ['tx', 'beforeStates', 'afterStates', 'appReceiptData', 'signedReceipt', 'globalModification'] + const shouldDeserialize = select === 'all' || + (Array.isArray(select) && select.some(field => serializableFields.includes(field as string))) || + serializableFields.includes(select as string) + if (shouldDeserialize) { + receipts.forEach((receipt: DbReceipt) => deserializeDbReceipt(receipt)) + } } catch (e) { console.log(e) } diff --git a/src/storage/transaction.ts b/src/storage/transaction.ts index 5779cfa..d8edb99 100644 --- a/src/storage/transaction.ts +++ b/src/storage/transaction.ts @@ -182,6 +182,8 @@ export async function queryTransactionCount( type QueryTransactionsParams = QueryTransactionCountParams & { skip?: number limit?: number /* default 10, set 0 for all */ + random?: boolean /* if true, returns results in random order */ + select?: keyof Transaction | (keyof Transaction)[] | 'all' /* fields to select, defaults to 'all' */ } export async function queryTransactions(query: QueryTransactionsParams): Promise { @@ -194,10 +196,18 @@ export async function queryTransactions(query: QueryTransactionsParams): Promise endCycleNumber, beforeTimestamp, afterTimestamp, + random = false, + select = 'all', } = query let transactions: DbTransaction[] = [] try { - let sql = `SELECT * FROM transactions` + // Build SELECT clause + let selectClause = '*' + if (select !== 'all') { + const fields = Array.isArray(select) ? select : [select] + selectClause = fields.join(', ') + } + let sql = `SELECT ${selectClause} FROM transactions` const values: unknown[] = [] if (txType) { if (txType === TransactionSearchParams.all) { @@ -229,7 +239,9 @@ export async function queryTransactions(query: QueryTransactionsParams): Promise sql += `timestamp > ?` values.push(afterTimestamp) } - if (beforeTimestamp > 0) { + if (random) { + sql += ` ORDER BY RANDOM()` + } else if (beforeTimestamp > 0) { sql += ` ORDER BY timestamp DESC` } else if (afterTimestamp > 0) { sql += ` ORDER BY timestamp ASC` @@ -246,7 +258,11 @@ export async function queryTransactions(query: QueryTransactionsParams): Promise } transactions = (await db.all(transactionDatabase, sql, values)) as DbTransaction[] // console.log('queryTransactions', sql, values, transactions) - if (transactions.length > 0) { + // Only deserialize data fields if they were selected + const shouldDeserialize = select === 'all' || + (Array.isArray(select) && (select.includes('data') || select.includes('originalTxData'))) || + select === 'data' || select === 'originalTxData' + if (transactions.length > 0 && shouldDeserialize) { transactions.forEach((transaction: DbTransaction) => { deserializeDbTransaction(transaction) })