-
Notifications
You must be signed in to change notification settings - Fork 103
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: remove earthly usage for benchmarks #654
Changes from all commits
db8fa2f
5108107
221c26d
7d5aefe
ab6def2
ddeb433
63cca4c
5d4d32d
40321e1
83da574
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,55 @@ | ||
name: Benchmark comparison | ||
on: | ||
workflow_dispatch: | ||
inputs: | ||
bench: | ||
description: 'Benchmarks to run' | ||
required: false | ||
default: '.' | ||
parallelism: | ||
description: 'Number of parallel benchmarks to run' | ||
required: false | ||
default: 5 | ||
duration: | ||
description: 'Duration of each benchmark' | ||
required: false | ||
default: '10s' | ||
count: | ||
description: 'Number of times to run each benchmark ' | ||
required: false | ||
default: 1 | ||
pull_request: | ||
types: [ assigned, opened, synchronize, reopened, labeled ] | ||
|
||
concurrency: | ||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }} | ||
cancel-in-progress: true | ||
|
||
jobs: | ||
BenchmarkCompare: | ||
runs-on: "github-001" | ||
if: contains(github.event.pull_request.labels.*.name, 'benchmarks') | ||
steps: | ||
- uses: 'actions/checkout@v4' | ||
with: | ||
fetch-depth: 0 | ||
- name: Setup Env | ||
uses: ./.github/actions/env | ||
with: | ||
token: ${{ secrets.NUMARY_GITHUB_TOKEN }} | ||
- run: > | ||
/nix/var/nix/profiles/default/bin/nix --extra-experimental-features "nix-command" --extra-experimental-features "flakes" | ||
develop --impure --command just | ||
--justfile ./test/performance/justfile | ||
--working-directory ./test/performance | ||
compare ${{ inputs.bench }} ${{ inputs.parallelism }} ${{ inputs.duration }} ${{ inputs.count }} | ||
- run: > | ||
/nix/var/nix/profiles/default/bin/nix --extra-experimental-features "nix-command" --extra-experimental-features "flakes" | ||
develop --impure --command just | ||
--justfile ./test/performance/justfile | ||
--working-directory ./test/performance | ||
graphs | ||
- uses: actions/upload-artifact@v4 | ||
with: | ||
name: graphs | ||
path: test/performance/report |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,43 +1,48 @@ | ||
name: Benchmark | ||
on: | ||
workflow_dispatch: | ||
pull_request: | ||
types: [ assigned, opened, synchronize, reopened, labeled ] | ||
inputs: | ||
bench: | ||
description: 'Benchmarks to run' | ||
required: false | ||
default: '.' | ||
parallelism: | ||
description: 'Number of parallel benchmarks to run' | ||
required: false | ||
default: 5 | ||
duration: | ||
description: 'Duration of each benchmark' | ||
required: false | ||
default: '10s' | ||
|
||
concurrency: | ||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} | ||
group: ${{ github.workflow }}-${{ github.ref }} | ||
cancel-in-progress: true | ||
|
||
jobs: | ||
Benchmark: | ||
runs-on: "github-001" | ||
if: contains(github.event.pull_request.labels.*.name, 'benchmarks') || github.ref == 'refs/heads/main' | ||
if: github.ref == 'refs/heads/main' | ||
steps: | ||
- uses: 'actions/checkout@v4' | ||
with: | ||
fetch-depth: 0 | ||
- run: go build -o /tmp/ledger ./ | ||
- run: echo "running actions as ${USER}" | ||
- run: > | ||
/tmp/ledger serve | ||
--postgres-uri=postgres://formance:formance@127.0.0.1/ledger | ||
--postgres-conn-max-idle-time=120s | ||
--postgres-max-open-conns=500 | ||
--postgres-max-idle-conns=100 | ||
--experimental-features | ||
--otel-metrics-keep-in-memory & | ||
- name: Setup Env | ||
uses: ./.github/actions/env | ||
with: | ||
token: ${{ secrets.NUMARY_GITHUB_TOKEN }} | ||
- run: > | ||
earthly | ||
--allow-privileged | ||
${{ contains(github.event.pull_request.labels.*.name, 'no-cache') && '--no-cache' || '' }} | ||
./test/performance+run --args="-benchtime 10s --ledger.url=http://localhost:3068 --parallelism=5" --locally=yes | ||
/nix/var/nix/profiles/default/bin/nix --extra-experimental-features "nix-command" --extra-experimental-features "flakes" | ||
develop --impure --command just | ||
--justfile ./test/performance/justfile | ||
--working-directory ./test/performance | ||
run ${{ inputs.bench }} ${{ inputs.parallelism }} ${{ inputs.duration }} 1 | ||
- run: > | ||
earthly | ||
--allow-privileged | ||
${{ contains(github.event.pull_request.labels.*.name, 'no-cache') && '--no-cache' || '' }} | ||
./test/performance+generate-graphs | ||
- run: kill -9 $(ps aux | grep "ledger serve"| grep -v "grep" | awk '{print $2}') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If there isn't this part, and the benchmarks go wrong for X or Y reason, the Ledger will continue to work and won't be cut. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The ledger is run via the test, not externally |
||
if: always() | ||
/nix/var/nix/profiles/default/bin/nix --extra-experimental-features "nix-command" --extra-experimental-features "flakes" | ||
develop --impure --command just | ||
--justfile ./test/performance/justfile | ||
--working-directory ./test/performance | ||
graphs | ||
- uses: actions/upload-artifact@v4 | ||
with: | ||
name: graphs | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -101,6 +101,7 @@ | |
just | ||
nodejs_22 | ||
self.packages.${system}.speakeasy | ||
goperf | ||
]; | ||
}; | ||
} | ||
|
This file was deleted.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
*.js |
This file was deleted.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -6,15 +6,15 @@ const main = async () => { | |
let buffer = fs.readFileSync('../report/report.json', 'utf-8'); | ||
let reports = JSON.parse(buffer); | ||
await exportTPSGraph({ | ||
output: 'tps.png', | ||
output: '../report/tps.png', | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 💡 Codebase verification Directory creation check is required The codebase lacks directory existence verification before file operations. The suggested change to create the '../report' directory is necessary to prevent runtime errors.
if (!fs.existsSync('../report')) {
fs.mkdirSync('../report', { recursive: true });
} 🔗 Analysis chainVerify report directory existence before file operations. While the path changes are consistent, ensure the '../report' directory exists before writing files to prevent runtime errors. Consider adding directory creation: const main = async () => {
+ // Ensure report directory exists
+ if (!fs.existsSync('../report')) {
+ fs.mkdirSync('../report', { recursive: true });
+ }
let buffer = fs.readFileSync('../report/report.json', 'utf-8'); Also applies to: 12-12, 17-17 🏁 Scripts executedThe following scripts were executed for the analysis: Error: No shell scripts found Script: #!/bin/bash
# Check the content of the test file
cat test/performance/charts/index.ts
# Search for directory creation patterns in related files
rg -l "mkdirSync.*report" --type ts
# Look for fs operations in test setup
rg "fs\.(mkdir|existsSync)" --type ts
Length of output: 778 Script: #!/bin/bash
# Check for test runner configuration
fd -e json package.json --exec cat {}
# Look for test setup files
fd -e ts -e js setup test/performance --exec cat {}
Length of output: 650 |
||
}, reports); | ||
|
||
await exportDatabaseStats('database_connections.png', reports); | ||
await exportDatabaseStats('../report/database_connections.png', reports); | ||
|
||
const ps: (keyof MetricsTime)[] = ['P99', 'P95', 'P75', 'Avg'] | ||
for (let p of ps) { | ||
await exportLatencyGraph({ | ||
output: p.toLowerCase() + '.png' | ||
output: '../report/' + p.toLowerCase() + '.png' | ||
}, p, reports); | ||
} | ||
} | ||
|
This file was deleted.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why only on github-001 ?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
No specific reason, i just copy/paste from the original workflow.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I guess comparison will be more accutrate if benchmarks are ran in the same machines?