diff --git a/.github/workflows/ci-pr.yml b/.github/workflows/ci-pr.yml
index 91961ff..340e73b 100644
--- a/.github/workflows/ci-pr.yml
+++ b/.github/workflows/ci-pr.yml
@@ -20,12 +20,12 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 1
- name: Set up JDK ${{ matrix.java }}
- uses: actions/setup-java@v4
+ uses: actions/setup-java@v5
with:
java-version: ${{ matrix.java }}
distribution: 'temurin'
@@ -36,7 +36,7 @@ jobs:
# Upload XMLs ONLY once (Java 21) so the report doesn't double-count
- name: Upload unit test XMLs (Java 21 only)
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v6
if: always() && matrix.java == '21'
with:
name: unit-xml
@@ -56,12 +56,12 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 1
- name: Set up JDK ${{ matrix.java }}
- uses: actions/setup-java@v4
+ uses: actions/setup-java@v5
with:
java-version: ${{ matrix.java }}
distribution: 'temurin'
@@ -72,7 +72,7 @@ jobs:
# Upload XMLs ONLY once (Java 21) so the report doesn't double-count
- name: Upload IT test XMLs (Java 21 only)
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v6
if: always() && matrix.java == '21'
with:
name: it-xml
@@ -87,12 +87,12 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 1
- name: Set up JDK 21
- uses: actions/setup-java@v4
+ uses: actions/setup-java@v5
with:
java-version: '21'
distribution: 'temurin'
@@ -107,35 +107,6 @@ jobs:
- name: Run Checkstyle analysis
run: mvn -B checkstyle:check -Pqa -Ddependency-check.skip=true
- dependency-check:
- name: OWASP Dependency Check
- runs-on: ubuntu-latest
- needs: build
- env:
- NVD_API_KEY: ${{ secrets.NVD_API_KEY }}
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
-
- - name: Set up JDK 21
- uses: actions/setup-java@v4
- with:
- java-version: '21'
- distribution: 'temurin'
- cache: 'maven'
-
- - name: Cache Dependency-Check DB
- uses: actions/cache@v4
- with:
- path: ~/.m2/repository/org/owasp/dependency-check-data
- key: depcheck-${{ runner.os }}-${{ hashFiles('**/pom.xml') }}
- restore-keys: |
- depcheck-${{ runner.os }}-
-
- - name: Run OWASP Dependency Check
- run: mvn -B dependency-check:aggregate -Pqa
-
reports:
name: Test Reports
runs-on: ubuntu-latest
@@ -149,26 +120,26 @@ jobs:
steps:
- name: Download unit XMLs (Java 21 only)
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v7
with:
name: unit-xml
path: reports/unit
- name: Download IT XMLs (Java 21 only)
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v7
with:
name: it-xml
path: reports/it
- name: Publish Unit Test Report
- uses: mikepenz/action-junit-report@v4
+ uses: mikepenz/action-junit-report@v6
if: always()
with:
report_paths: 'reports/unit/**/TEST-*.xml'
check_name: Unit Test Report
- name: Publish IT Test Report
- uses: mikepenz/action-junit-report@v4
+ uses: mikepenz/action-junit-report@v6
if: always()
with:
report_paths: 'reports/it/**/TEST-*.xml'
diff --git a/.github/workflows/ci-push.yml b/.github/workflows/ci-push.yml
index ddd4e2a..a55ab3e 100644
--- a/.github/workflows/ci-push.yml
+++ b/.github/workflows/ci-push.yml
@@ -19,12 +19,12 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 1
- name: Set up JDK ${{ matrix.java }}
- uses: actions/setup-java@v4
+ uses: actions/setup-java@v5
with:
java-version: ${{ matrix.java }}
distribution: 'temurin'
@@ -34,7 +34,7 @@ jobs:
run: mvn -B clean verify -Pqa -Ddependency-check.skip=true
- name: Upload unit test XMLs
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v6
if: always()
with:
name: unit-xml-java-${{ matrix.java }}
@@ -55,14 +55,14 @@ jobs:
steps:
- name: Download unit XMLs (all Java versions)
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v7
with:
pattern: unit-xml-java-*
merge-multiple: true
path: reports/unit
- name: Publish Unit Test Report
- uses: mikepenz/action-junit-report@v4
+ uses: mikepenz/action-junit-report@v6
if: always()
with:
report_paths: 'reports/unit/**/TEST-*.xml'
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 5233757..da7d0dd 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -20,19 +20,19 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 1
- name: Set up JDK 21
- uses: actions/setup-java@v4
+ uses: actions/setup-java@v5
with:
java-version: '21'
distribution: 'temurin'
cache: 'maven'
- name: Initialize CodeQL
- uses: github/codeql-action/init@v3
+ uses: github/codeql-action/init@v4
with:
languages: java-kotlin
build-mode: manual
@@ -42,6 +42,6 @@ jobs:
run: mvn -B clean compile -DskipTests
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v3
+ uses: github/codeql-action/analyze@v4
with:
category: "/language:java-kotlin"
diff --git a/.github/workflows/dependabot-retarget-to-develop.yml b/.github/workflows/dependabot-retarget-to-develop.yml
index ef0d89d..d6df9d6 100644
--- a/.github/workflows/dependabot-retarget-to-develop.yml
+++ b/.github/workflows/dependabot-retarget-to-develop.yml
@@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Change base branch to develop
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
const pr = context.payload.pull_request;
diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml
index 1dbab60..67b6c0f 100644
--- a/.github/workflows/dependency-review.yml
+++ b/.github/workflows/dependency-review.yml
@@ -15,7 +15,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 1
diff --git a/.github/workflows/owasp-dependency-check.yml b/.github/workflows/owasp-dependency-check.yml
new file mode 100644
index 0000000..a821a57
--- /dev/null
+++ b/.github/workflows/owasp-dependency-check.yml
@@ -0,0 +1,48 @@
+name: OWASP Dependency Check
+
+on:
+ schedule:
+ # Runs every Sunday at 3:00 AM UTC
+ - cron: '0 3 * * 0'
+ workflow_dispatch:
+
+permissions:
+ contents: read
+ security-events: write
+
+jobs:
+ dependency-check:
+ name: OWASP Dependency Check
+ runs-on: ubuntu-latest
+ env:
+ NVD_API_KEY: ${{ secrets.NVD_API_KEY }}
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v6
+
+ - name: Set up JDK 21
+ uses: actions/setup-java@v5
+ with:
+ java-version: '21'
+ distribution: 'temurin'
+ cache: 'maven'
+
+ - name: Cache Dependency-Check DB
+ uses: actions/cache@v5
+ with:
+ path: ~/.m2/repository/org/owasp/dependency-check-data
+ key: depcheck-${{ runner.os }}-${{ hashFiles('**/pom.xml') }}
+ restore-keys: |
+ depcheck-${{ runner.os }}-
+
+ - name: Run OWASP Dependency Check
+ run: mvn -B dependency-check:aggregate -Pqa
+
+ - name: Upload Dependency Check Report
+ uses: actions/upload-artifact@v6
+ if: always()
+ with:
+ name: dependency-check-report
+ path: target/dependency-check-report.html
+ retention-days: 30
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 8867b33..2140a2a 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -33,7 +33,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -65,7 +65,7 @@ jobs:
fi
- name: Set up JDK ${{ env.JAVA_VERSION }}
- uses: actions/setup-java@v4
+ uses: actions/setup-java@v5
with:
java-version: ${{ env.JAVA_VERSION }}
distribution: 'temurin'
@@ -85,10 +85,10 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
- name: Set up JDK ${{ matrix.java }}
- uses: actions/setup-java@v4
+ uses: actions/setup-java@v5
with:
java-version: ${{ matrix.java }}
distribution: 'temurin'
@@ -105,10 +105,10 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
- name: Set up JDK ${{ env.JAVA_VERSION }}
- uses: actions/setup-java@v4
+ uses: actions/setup-java@v5
env:
CENTRAL_USERNAME: ${{ secrets.CENTRAL_USERNAME }}
CENTRAL_TOKEN: ${{ secrets.CENTRAL_TOKEN }}
@@ -137,10 +137,10 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
- name: Set up JDK ${{ env.JAVA_VERSION }}
- uses: actions/setup-java@v4
+ uses: actions/setup-java@v5
with:
java-version: ${{ env.JAVA_VERSION }}
distribution: 'temurin'
@@ -150,7 +150,7 @@ jobs:
run: mvn -B cyclonedx:makeAggregateBom -Pqa
- name: Upload SBOM artifact
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v6
with:
name: sbom
path: target/bom.*
@@ -168,10 +168,10 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
- name: Download SBOM
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v7
with:
name: sbom
path: sbom/
diff --git a/.gitignore b/.gitignore
index 5232b33..dcc6e4a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,3 +50,6 @@ bin/
# Claude Code
/.claude/
/CLAUDE.md
+
+# GitHub
+current-ticket.md
\ No newline at end of file
diff --git a/aether-datafixers-api/pom.xml b/aether-datafixers-api/pom.xml
index 1322745..eb56948 100644
--- a/aether-datafixers-api/pom.xml
+++ b/aether-datafixers-api/pom.xml
@@ -6,7 +6,7 @@
de.splatgames.aether.datafixersaether-datafixers
- 0.5.0
+ 1.0.0-SNAPSHOTaether-datafixers-api
diff --git a/aether-datafixers-benchmarks/pom.xml b/aether-datafixers-benchmarks/pom.xml
new file mode 100644
index 0000000..ef8e7ba
--- /dev/null
+++ b/aether-datafixers-benchmarks/pom.xml
@@ -0,0 +1,174 @@
+
+
+ 4.0.0
+
+
+ de.splatgames.aether.datafixers
+ aether-datafixers
+ 1.0.0-SNAPSHOT
+
+
+ aether-datafixers-benchmarks
+ jar
+
+ Aether Datafixers :: Benchmarks
+ JMH microbenchmarks for Aether Datafixers performance analysis.
+
+
+
+ true
+ true
+
+ true
+
+ de.splatgames.aether.datafixers.benchmarks.BenchmarkRunner
+
+
+
+
+
+ de.splatgames.aether.datafixers
+ aether-datafixers-api
+
+
+ de.splatgames.aether.datafixers
+ aether-datafixers-core
+
+
+ de.splatgames.aether.datafixers
+ aether-datafixers-codec
+
+
+ de.splatgames.aether.datafixers
+ aether-datafixers-testkit
+
+
+
+
+ org.openjdk.jmh
+ jmh-core
+
+
+ org.openjdk.jmh
+ jmh-generator-annprocess
+ provided
+
+
+
+
+ com.google.code.gson
+ gson
+
+
+ com.fasterxml.jackson.core
+ jackson-databind
+
+
+
+
+ org.yaml
+ snakeyaml
+
+
+ com.fasterxml.jackson.dataformat
+ jackson-dataformat-yaml
+
+
+
+
+ com.fasterxml.jackson.dataformat
+ jackson-dataformat-toml
+
+
+
+
+ com.fasterxml.jackson.dataformat
+ jackson-dataformat-xml
+
+
+
+
+ com.google.guava
+ guava
+
+
+
+
+ org.jetbrains
+ annotations
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+
+ org.openjdk.jmh
+ jmh-generator-annprocess
+ ${jmh.version}
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-enforcer-plugin
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+ ${plugin.shade.version}
+
+
+ package
+
+ shade
+
+
+
+
+ org.openjdk.jmh.Main
+
+
+
+
+
+
+ *:*
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+ META-INF/MANIFEST.MF
+
+
+
+ false
+ true
+ benchmarks
+
+
+
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ 3.1.0
+
+ ${main.class}
+
+
+
+
+
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java
new file mode 100644
index 0000000..beb5766
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks;
+
+import org.openjdk.jmh.runner.Runner;
+import org.openjdk.jmh.runner.RunnerException;
+import org.openjdk.jmh.runner.options.Options;
+import org.openjdk.jmh.runner.options.OptionsBuilder;
+
+import java.io.IOException;
+
+/**
+ * Main entry point for running Aether Datafixers JMH benchmarks.
+ *
+ *
This class provides both a command-line interface and programmatic API for
+ * executing benchmarks. It supports all standard JMH options while providing
+ * convenient preset configurations for common benchmark scenarios.
+ *
+ *
Execution Methods
+ *
+ *
Via Maven exec:java (Development)
+ *
Quick way to run benchmarks during development without building a JAR:
+ *
{@code
+ * # Run all benchmarks with default settings
+ * mvn exec:java -pl aether-datafixers-benchmarks
+ *
+ * # Run with JMH arguments
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="-h"
+ *
+ * # Run specific benchmark pattern
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="SingleFixBenchmark"
+ * }
+ *
+ *
Via Fat JAR (Production)
+ *
Recommended for production benchmark runs with full JMH isolation:
+ *
{@code
+ * # Build the fat JAR
+ * mvn clean package -pl aether-datafixers-benchmarks -DskipTests
+ *
+ * # Run all benchmarks
+ * java -jar aether-datafixers-benchmarks/target/aether-datafixers-benchmarks-*-benchmarks.jar
+ *
+ * # Run specific benchmark
+ * java -jar target/*-benchmarks.jar SingleFixBenchmark
+ *
+ * # Run with custom parameters
+ * java -jar target/*-benchmarks.jar -p payloadSize=LARGE -wi 3 -i 5 -f 1
+ *
+ * # Output JSON results for analysis
+ * java -jar target/*-benchmarks.jar -rf json -rff results.json
+ *
+ * # List all available benchmarks
+ * java -jar target/*-benchmarks.jar -l
+ *
+ * # Profile with async-profiler
+ * java -jar target/*-benchmarks.jar -prof async:output=flamegraph
+ * }
For integration with test frameworks or custom tooling:
+ *
{@code
+ * // Run all benchmarks
+ * BenchmarkRunner.runAllBenchmarks();
+ *
+ * // Run quick validation (CI/CD)
+ * BenchmarkRunner.runQuickBenchmarks();
+ *
+ * // Run only core benchmarks
+ * BenchmarkRunner.runCoreBenchmarks();
+ *
+ * // Run only format benchmarks
+ * BenchmarkRunner.runFormatBenchmarks();
+ * }
+ *
+ *
Default Configuration
+ *
+ *
Setting
Default
Quick Mode
+ *
Warmup iterations
5
2
+ *
Measurement iterations
10
3
+ *
Forks
2
1
+ *
JVM heap
2 GB
1 GB
+ *
+ *
+ *
Common JMH Options
+ *
+ *
Option
Description
Example
+ *
{@code -wi}
Warmup iterations
{@code -wi 3}
+ *
{@code -i}
Measurement iterations
{@code -i 5}
+ *
{@code -f}
Number of forks
{@code -f 1}
+ *
{@code -p}
Parameter value
{@code -p payloadSize=SMALL}
+ *
{@code -t}
Thread count
{@code -t 4}
+ *
{@code -rf}
Result format
{@code -rf json}
+ *
{@code -rff}
Result file
{@code -rff results.json}
+ *
{@code -l}
List benchmarks
{@code -l}
+ *
{@code -prof}
Profiler
{@code -prof gc}
+ *
+ *
+ * @author Erik Pförtner
+ * @see de.splatgames.aether.datafixers.benchmarks.core
+ * @see de.splatgames.aether.datafixers.benchmarks.codec
+ * @see de.splatgames.aether.datafixers.benchmarks.concurrent
+ * @since 1.0.0
+ */
+public final class BenchmarkRunner {
+
+ /**
+ * Private constructor to prevent instantiation.
+ */
+ private BenchmarkRunner() {
+ // Main class
+ }
+
+ /**
+ * Main entry point for running benchmarks from the command line.
+ *
+ *
Behavior depends on whether arguments are provided:
+ *
+ *
With arguments: Delegates to JMH's main method, supporting all
+ * standard JMH command-line options
+ *
Without arguments: Runs all benchmarks using default configuration
+ * via {@link #runAllBenchmarks()}
+ *
+ *
+ *
Exit Codes
+ *
+ *
0 - Successful completion
+ *
Non-zero - Error during benchmark execution
+ *
+ *
+ * @param args command-line arguments (passed directly to JMH if present)
+ * @throws RunnerException if benchmark execution fails
+ * @throws IOException if there is an I/O error reading benchmark metadata
+ */
+ public static void main(final String[] args) throws RunnerException, IOException {
+ if (args.length > 0) {
+ // If arguments are provided, delegate to JMH main
+ org.openjdk.jmh.Main.main(args);
+ } else {
+ // Run with default options
+ runAllBenchmarks();
+ }
+ }
+
+ /**
+ * Runs all benchmarks in the benchmarks package with default configuration.
+ *
+ *
Executes every benchmark class in
+ * {@code de.splatgames.aether.datafixers.benchmarks.*} with production-quality
+ * settings suitable for reliable performance measurements.
+ *
+ *
Configuration
+ *
+ *
Warmup: 5 iterations
+ *
Measurement: 10 iterations
+ *
Forks: 2 (for JIT variance mitigation)
+ *
JVM heap: 2 GB min/max
+ *
+ *
+ *
Note: Running all benchmarks can take significant time depending
+ * on the number of parameter combinations. Consider using
+ * {@link #runQuickBenchmarks()} for validation or {@link #runCoreBenchmarks()}
+ * for focused testing.
+ *
+ * @throws RunnerException if benchmark execution fails
+ * @see #runQuickBenchmarks()
+ * @see #runCoreBenchmarks()
+ */
+ public static void runAllBenchmarks() throws RunnerException {
+ final Options options = new OptionsBuilder()
+ .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\..*")
+ .warmupIterations(5)
+ .measurementIterations(10)
+ .forks(2)
+ .jvmArgs("-Xms2G", "-Xmx2G")
+ .build();
+
+ new Runner(options).run();
+ }
+
+ /**
+ * Runs a quick subset of benchmarks for fast validation.
+ *
+ *
Executes only the {@code SingleFixBenchmark} with minimal iterations,
+ * suitable for:
+ *
+ *
CI/CD pipeline smoke tests
+ *
Quick sanity checks during development
+ *
Verifying benchmark infrastructure works correctly
+ *
+ *
+ *
Configuration
+ *
+ *
Benchmark: SingleFixBenchmark only
+ *
Warmup: 2 iterations
+ *
Measurement: 3 iterations
+ *
Forks: 1 (faster but less statistically robust)
+ *
JVM heap: 1 GB min/max
+ *
Payload size: SMALL only
+ *
+ *
+ *
Warning: Results from quick benchmarks should not be used for
+ * performance comparisons due to reduced statistical rigor.
+ *
+ * @throws RunnerException if benchmark execution fails
+ * @see #runAllBenchmarks()
+ */
+ public static void runQuickBenchmarks() throws RunnerException {
+ final Options options = new OptionsBuilder()
+ .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.core\\.SingleFixBenchmark")
+ .warmupIterations(2)
+ .measurementIterations(3)
+ .forks(1)
+ .jvmArgs("-Xms1G", "-Xmx1G")
+ .param("payloadSize", "SMALL")
+ .build();
+
+ new Runner(options).run();
+ }
+
+ /**
+ * Runs only the core migration benchmarks.
+ *
+ *
Executes benchmarks in the {@code core} package that measure DataFixer
+ * migration performance:
+ *
+ *
{@code SingleFixBenchmark} - Single fix application performance
Use this method when focusing on migration performance without
+ * format-specific or codec overhead considerations.
+ *
+ * @throws RunnerException if benchmark execution fails
+ * @see #runFormatBenchmarks()
+ * @see #runAllBenchmarks()
+ */
+ public static void runCoreBenchmarks() throws RunnerException {
+ final Options options = new OptionsBuilder()
+ .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.core\\..*")
+ .warmupIterations(5)
+ .measurementIterations(10)
+ .forks(2)
+ .jvmArgs("-Xms2G", "-Xmx2G")
+ .build();
+
+ new Runner(options).run();
+ }
+
+ /**
+ * Runs only the format comparison benchmarks.
+ *
+ *
Executes benchmarks in the {@code format} package that compare different
+ * DynamicOps implementations:
+ *
+ *
{@code JsonBenchmark} - GsonOps vs JacksonJsonOps
+ *
{@code YamlBenchmark} - SnakeYamlOps vs JacksonYamlOps
+ *
{@code TomlXmlBenchmark} - JacksonTomlOps and JacksonXmlOps
+ *
{@code CrossFormatBenchmark} - Format conversion performance
+ *
+ *
+ *
Configuration
+ *
+ *
Warmup: 5 iterations
+ *
Measurement: 10 iterations
+ *
Forks: 2
+ *
JVM heap: 2 GB min/max
+ *
+ *
+ *
Use this method when evaluating which DynamicOps implementation
+ * to use for a specific use case, or when optimizing format handling.
+ *
+ * @throws RunnerException if benchmark execution fails
+ * @see #runCoreBenchmarks()
+ * @see #runAllBenchmarks()
+ */
+ public static void runFormatBenchmarks() throws RunnerException {
+ final Options options = new OptionsBuilder()
+ .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.format\\..*")
+ .warmupIterations(5)
+ .measurementIterations(10)
+ .forks(2)
+ .jvmArgs("-Xms2G", "-Xmx2G")
+ .build();
+
+ new Runner(options).run();
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
new file mode 100644
index 0000000..56405aa
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.codec;
+
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.codec.Codec;
+import de.splatgames.aether.datafixers.api.codec.Codecs;
+import de.splatgames.aether.datafixers.api.result.DataResult;
+import de.splatgames.aether.datafixers.api.util.Pair;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for collection codec encode/decode performance.
+ *
+ *
Measures the performance of list codec operations with parameterized collection
+ * sizes. These benchmarks reveal how codec performance scales with data volume and
+ * help identify potential bottlenecks in collection traversal and element processing.
+ *
+ *
Benchmark Categories
+ *
+ *
String List Benchmarks
+ *
Measure {@code List} codec operations:
+ *
+ *
{@link #encodeStringList} - Encode string list to JSON array
+ *
{@link #decodeStringList} - Decode JSON array to string list
+ *
{@link #roundTripStringListDirect} - Complete round-trip with direct extraction
+ *
{@link #roundTripStringListFunctional} - Complete round-trip using functional API
+ *
+ *
+ *
Integer List Benchmarks
+ *
Measure {@code List} codec operations:
+ *
+ *
{@link #encodeIntList} - Encode integer list to JSON array
+ *
{@link #decodeIntList} - Decode JSON array to integer list
+ *
{@link #roundTripIntListDirect} - Complete round-trip with direct extraction
+ *
{@link #roundTripIntListFunctional} - Complete round-trip using functional API
+ *
+ *
+ *
Parameters
+ *
+ *
Parameter
Values
Description
+ *
listSize
10, 100, 1000
Number of elements in the test list
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2 (for JIT variance mitigation)
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Microseconds (appropriate for collection operations)
+ *
+ *
+ *
Test Data Generation
+ *
+ *
Collection
Element Pattern
Example (size=3)
+ *
String List
{@code "item-" + index}
["item-0", "item-1", "item-2"]
+ *
Integer List
{@code index}
[0, 1, 2]
+ *
+ *
+ *
Interpreting Results
+ *
+ *
Linear scaling: Expected behavior where time scales proportionally with list size.
+ * If 100 elements takes 10x longer than 10 elements, scaling is linear.
+ *
Sub-linear scaling: Better than expected, may indicate JIT optimizations
+ * or efficient batch processing.
+ *
Super-linear scaling: Performance degrades faster than list size grows.
+ * May indicate memory pressure, GC overhead, or algorithmic inefficiency.
+ *
String vs Integer: String lists typically have higher overhead due to
+ * object allocation and potential string interning effects.
+ *
Direct vs Functional: Functional API (using {@code flatMap}) may show
+ * slight overhead from lambda creation and DataResult chaining.
+ *
+ *
+ *
Usage
+ *
{@code
+ * # Run all collection codec benchmarks
+ * java -jar benchmarks.jar CollectionCodecBenchmark
+ *
+ * # Run with specific list size
+ * java -jar benchmarks.jar CollectionCodecBenchmark -p listSize=1000
+ *
+ * # Run only string list benchmarks
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.*String.*"
+ *
+ * # Run only encode benchmarks
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.encode.*"
+ *
+ * # Compare direct vs functional round-trip
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.roundTrip.*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar CollectionCodecBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate JSON report for analysis
+ * java -jar benchmarks.jar CollectionCodecBenchmark -rf json -rff collection_results.json
+ * }
+ *
+ * @author Erik Pförtner
+ * @see PrimitiveCodecBenchmark
+ * @see de.splatgames.aether.datafixers.api.codec.Codecs#list(Codec)
+ * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class CollectionCodecBenchmark {
+
+ /**
+ * The number of elements in test lists, injected by JMH.
+ *
+ *
This parameter controls the size of both string and integer lists.
+ * Different sizes reveal scaling characteristics of the list codec:
+ *
+ *
10: Small list baseline, minimal memory/GC impact
+ *
100: Medium list, typical real-world collection size
+ *
1000: Large list stress test, reveals scaling behavior
+ *
+ */
+ @Param({"10", "100", "1000"})
+ private int listSize;
+
+ /**
+ * The DynamicOps implementation used for all codec operations.
+ *
+ *
GsonOps is used as the reference JSON implementation for benchmarks.
Measures the performance of converting a {@code List} to a JSON
+ * array element. Each string element is individually encoded and added to the
+ * resulting array.
+ *
+ *
Performance factors:
+ *
+ *
List iteration overhead
+ *
Per-element string encoding cost
+ *
JSON array construction and element addition
+ *
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void encodeStringList(final Blackhole blackhole) {
+ final DataResult result = this.stringListCodec.encodeStart(this.ops, this.stringList);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks string list decoding from JSON array.
+ *
+ *
Measures the performance of extracting a {@code List} from a
+ * pre-encoded JSON array. Each array element is decoded to a string and
+ * collected into the result list.
+ *
+ *
Performance factors:
+ *
+ *
JSON array traversal
+ *
Per-element string extraction
+ *
Result list construction and population
+ *
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void decodeStringList(final Blackhole blackhole) {
+ final DataResult, JsonElement>> result = this.stringListCodec.decode(this.ops, this.encodedStringList);
+ blackhole.consume(result);
+ }
+
+ // ==================== Integer List Benchmarks ====================
+
+ /**
+ * Benchmarks integer list encoding to JSON array.
+ *
+ *
Measures the performance of converting a {@code List} to a JSON
+ * array element. Integer encoding is typically faster than string encoding
+ * due to simpler value representation.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void encodeIntList(final Blackhole blackhole) {
+ final DataResult result = this.intListCodec.encodeStart(this.ops, this.intList);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks integer list decoding from JSON array.
+ *
+ *
Measures the performance of extracting a {@code List} from a
+ * pre-encoded JSON array. Integer decoding involves numeric parsing from
+ * JSON number elements.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void decodeIntList(final Blackhole blackhole) {
+ final DataResult, JsonElement>> result = this.intListCodec.decode(this.ops, this.encodedIntList);
+ blackhole.consume(result);
+ }
+
+ // ==================== Round-Trip Benchmarks (Direct Style) ====================
+
+ /**
+ * Benchmarks complete string list round-trip with direct result extraction.
+ *
+ *
Measures the combined performance of encoding a {@code List} to JSON
+ * and immediately decoding it back. Uses {@code result().orElseThrow()} for
+ * direct value extraction, representing typical imperative usage patterns.
+ *
+ *
This benchmark is useful for scenarios where data is temporarily serialized
+ * (e.g., caching, message passing) and immediately deserialized.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void roundTripStringListDirect(final Blackhole blackhole) {
+ final JsonElement json = this.stringListCodec.encodeStart(this.ops, this.stringList)
+ .result().orElseThrow();
+ final Pair, JsonElement> decoded = this.stringListCodec.decode(this.ops, json)
+ .result().orElseThrow();
+ blackhole.consume(decoded);
+ }
+
+ /**
+ * Benchmarks complete integer list round-trip with direct result extraction.
+ *
+ *
Measures the combined performance of encoding a {@code List} to JSON
+ * and immediately decoding it back using direct value extraction.
Measures the combined performance of encoding and decoding using
+ * {@link DataResult#flatMap} for monadic composition. This represents
+ * the functional programming style where operations are chained without
+ * explicit result unwrapping.
+ *
+ *
Comparing with {@link #roundTripStringListDirect} reveals the overhead
+ * (if any) of the functional API approach versus direct extraction.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void roundTripStringListFunctional(final Blackhole blackhole) {
+ final DataResult encoded = this.stringListCodec.encodeStart(this.ops, this.stringList);
+ final DataResult, JsonElement>> decoded = encoded.flatMap(
+ json -> this.stringListCodec.decode(this.ops, json)
+ );
+ blackhole.consume(decoded);
+ }
+
+ /**
+ * Benchmarks complete integer list round-trip using functional API.
+ *
+ *
Measures the combined performance of encoding and decoding using
+ * monadic composition via {@link DataResult#flatMap}.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void roundTripIntListFunctional(final Blackhole blackhole) {
+ final DataResult encoded = this.intListCodec.encodeStart(this.ops, this.intList);
+ final DataResult, JsonElement>> decoded = encoded.flatMap(
+ json -> this.intListCodec.decode(this.ops, json)
+ );
+ blackhole.consume(decoded);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
new file mode 100644
index 0000000..7e9c8da
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.codec;
+
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.codec.Codecs;
+import de.splatgames.aether.datafixers.api.result.DataResult;
+import de.splatgames.aether.datafixers.api.util.Pair;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for primitive type codec encode/decode performance.
+ *
+ *
Measures the baseline performance of the fundamental codec operations for
+ * primitive Java types. These benchmarks establish the lower bound for codec
+ * performance and help identify overhead introduced by more complex codec
+ * compositions.
+ *
+ *
Benchmark Categories
+ *
+ *
Encode Benchmarks
+ *
Measure Java value to JSON element conversion:
+ *
+ *
{@link #encodeBool} - Boolean encoding
+ *
{@link #encodeInt} - Integer encoding
+ *
{@link #encodeLong} - Long encoding
+ *
{@link #encodeFloat} - Float encoding
+ *
{@link #encodeDouble} - Double encoding
+ *
{@link #encodeString} - String encoding
+ *
+ *
+ *
Decode Benchmarks
+ *
Measure JSON element to Java value conversion:
+ *
+ *
{@link #decodeBool} - Boolean decoding
+ *
{@link #decodeInt} - Integer decoding
+ *
{@link #decodeLong} - Long decoding
+ *
{@link #decodeFloat} - Float decoding
+ *
{@link #decodeDouble} - Double decoding
+ *
{@link #decodeString} - String decoding
+ *
+ *
+ *
Round-Trip Benchmarks
+ *
Measure complete encode-then-decode cycles:
+ *
+ *
{@link #roundTripIntDirect} - Integer round-trip with direct result extraction
+ *
{@link #roundTripStringDirect} - String round-trip with direct result extraction
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2 (for JIT variance mitigation)
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Nanoseconds (for fine-grained primitive ops)
+ *
+ *
+ *
Test Values
+ *
+ *
Type
Value
Notes
+ *
boolean
{@code true}
Single bit representation
+ *
int
{@code 42}
Small positive integer
+ *
long
{@code 123456789L}
Value exceeding int range representation
+ *
float
{@code 3.14159f}
Pi approximation (tests decimal handling)
+ *
double
{@code 2.718281828}
Euler's number (tests precision)
+ *
String
{@code "benchmark-test-string"}
21-character ASCII string
+ *
+ *
+ *
Interpreting Results
+ *
+ *
Encode vs Decode: Encoding typically allocates new JSON elements; decoding
+ * extracts values from existing elements. Similar performance is expected.
+ *
Numeric types: All numeric types should have similar performance as they
+ * map directly to JSON number primitives.
+ *
String codec: May show slightly different characteristics due to string
+ * interning and character encoding considerations.
+ *
Round-trip overhead: Should be approximately encode + decode time plus
+ * minimal DataResult unwrapping overhead.
+ *
+ * @author Erik Pförtner
+ * @see CollectionCodecBenchmark
+ * @see de.splatgames.aether.datafixers.api.codec.Codecs
+ * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class PrimitiveCodecBenchmark {
+
+ /**
+ * Test boolean value for encoding benchmarks.
+ */
+ private static final boolean TEST_BOOL = true;
+
+ /**
+ * Test integer value for encoding benchmarks.
+ *
+ *
A small positive integer that fits in a single JSON number token.
+ */
+ private static final int TEST_INT = 42;
+
+ /**
+ * Test long value for encoding benchmarks.
+ *
+ *
A value that exceeds typical int range to test long-specific handling.
+ */
+ private static final long TEST_LONG = 123456789L;
+
+ /**
+ * Test float value for encoding benchmarks.
+ *
+ *
Pi approximation to test decimal point handling and precision.
+ */
+ private static final float TEST_FLOAT = 3.14159f;
+
+ /**
+ * Test double value for encoding benchmarks.
+ *
+ *
Euler's number with extended precision to test double encoding accuracy.
+ */
+ private static final double TEST_DOUBLE = 2.718281828;
+
+ /**
+ * Test string value for encoding benchmarks.
+ *
+ *
A 21-character ASCII string representing typical field values.
+ */
+ private static final String TEST_STRING = "benchmark-test-string";
+
+ /**
+ * The DynamicOps implementation used for all codec operations.
+ *
+ *
GsonOps is used as the reference implementation for JSON format benchmarks.
+ */
+ private GsonOps ops;
+
+ /**
+ * Pre-encoded boolean JSON element for decode benchmarks.
+ */
+ private JsonElement encodedBool;
+
+ /**
+ * Pre-encoded integer JSON element for decode benchmarks.
+ */
+ private JsonElement encodedInt;
+
+ /**
+ * Pre-encoded long JSON element for decode benchmarks.
+ */
+ private JsonElement encodedLong;
+
+ /**
+ * Pre-encoded float JSON element for decode benchmarks.
+ */
+ private JsonElement encodedFloat;
+
+ /**
+ * Pre-encoded double JSON element for decode benchmarks.
+ */
+ private JsonElement encodedDouble;
+
+ /**
+ * Pre-encoded string JSON element for decode benchmarks.
+ */
+ private JsonElement encodedString;
+
+ /**
+ * Initializes pre-encoded JSON elements for decode benchmarks.
+ *
+ *
Pre-encoding ensures decode benchmarks measure only decoding performance
+ * without encoding overhead. All test values are encoded once at trial start.
Measures the performance of converting a Java {@code boolean} to a
+ * JSON boolean element via {@link Codecs#BOOL}.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void encodeBool(final Blackhole blackhole) {
+ final DataResult result = Codecs.BOOL.encodeStart(this.ops, TEST_BOOL);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks boolean value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code Boolean} from a
+ * pre-encoded JSON boolean element.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void decodeBool(final Blackhole blackhole) {
+ final DataResult> result = Codecs.BOOL.decode(this.ops, this.encodedBool);
+ blackhole.consume(result);
+ }
+
+ // ==================== Integer Benchmarks ====================
+
+ /**
+ * Benchmarks integer value encoding to JSON.
+ *
+ *
Measures the performance of converting a Java {@code int} to a
+ * JSON number element via {@link Codecs#INT}.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void encodeInt(final Blackhole blackhole) {
+ final DataResult result = Codecs.INT.encodeStart(this.ops, TEST_INT);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks integer value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code Integer} from a
+ * pre-encoded JSON number element.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void decodeInt(final Blackhole blackhole) {
+ final DataResult> result = Codecs.INT.decode(this.ops, this.encodedInt);
+ blackhole.consume(result);
+ }
+
+ // ==================== Long Benchmarks ====================
+
+ /**
+ * Benchmarks long value encoding to JSON.
+ *
+ *
Measures the performance of converting a Java {@code long} to a
+ * JSON number element via {@link Codecs#LONG}.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void encodeLong(final Blackhole blackhole) {
+ final DataResult result = Codecs.LONG.encodeStart(this.ops, TEST_LONG);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks long value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code Long} from a
+ * pre-encoded JSON number element.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void decodeLong(final Blackhole blackhole) {
+ final DataResult> result = Codecs.LONG.decode(this.ops, this.encodedLong);
+ blackhole.consume(result);
+ }
+
+ // ==================== Float Benchmarks ====================
+
+ /**
+ * Benchmarks float value encoding to JSON.
+ *
+ *
Measures the performance of converting a Java {@code float} to a
+ * JSON number element via {@link Codecs#FLOAT}. Float encoding involves
+ * decimal representation which may differ from integer encoding.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void encodeFloat(final Blackhole blackhole) {
+ final DataResult result = Codecs.FLOAT.encodeStart(this.ops, TEST_FLOAT);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks float value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code Float} from a
+ * pre-encoded JSON number element. Decoding involves parsing the decimal
+ * representation back to IEEE 754 single-precision format.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void decodeFloat(final Blackhole blackhole) {
+ final DataResult> result = Codecs.FLOAT.decode(this.ops, this.encodedFloat);
+ blackhole.consume(result);
+ }
+
+ // ==================== Double Benchmarks ====================
+
+ /**
+ * Benchmarks double value encoding to JSON.
+ *
+ *
Measures the performance of converting a Java {@code double} to a
+ * JSON number element via {@link Codecs#DOUBLE}. Double encoding preserves
+ * higher precision than float but uses similar mechanisms.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void encodeDouble(final Blackhole blackhole) {
+ final DataResult result = Codecs.DOUBLE.encodeStart(this.ops, TEST_DOUBLE);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks double value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code Double} from a
+ * pre-encoded JSON number element.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void decodeDouble(final Blackhole blackhole) {
+ final DataResult> result = Codecs.DOUBLE.decode(this.ops, this.encodedDouble);
+ blackhole.consume(result);
+ }
+
+ // ==================== String Benchmarks ====================
+
+ /**
+ * Benchmarks string value encoding to JSON.
+ *
+ *
Measures the performance of converting a Java {@code String} to a
+ * JSON string element via {@link Codecs#STRING}. String encoding may involve
+ * escape sequence handling for special characters.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void encodeString(final Blackhole blackhole) {
+ final DataResult result = Codecs.STRING.encodeStart(this.ops, TEST_STRING);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks string value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code String} from a
+ * pre-encoded JSON string element.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void decodeString(final Blackhole blackhole) {
+ final DataResult> result = Codecs.STRING.decode(this.ops, this.encodedString);
+ blackhole.consume(result);
+ }
+
+ // ==================== Round-Trip Benchmarks ====================
+
+ /**
+ * Benchmarks complete integer round-trip (encode then decode).
+ *
+ *
Measures the combined performance of encoding a Java {@code int} to JSON
+ * and immediately decoding it back. Uses direct result extraction via
+ * {@code result().orElseThrow()} to measure the typical non-functional usage pattern.
+ *
+ *
Round-trip performance is important for scenarios where data is temporarily
+ * serialized (e.g., caching, IPC) and immediately deserialized.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void roundTripIntDirect(final Blackhole blackhole) {
+ final JsonElement json = Codecs.INT.encodeStart(this.ops, TEST_INT).result().orElseThrow();
+ final Pair decoded = Codecs.INT.decode(this.ops, json).result().orElseThrow();
+ blackhole.consume(decoded);
+ }
+
+ /**
+ * Benchmarks complete string round-trip (encode then decode).
+ *
+ *
Measures the combined performance of encoding a Java {@code String} to JSON
+ * and immediately decoding it back. String round-trips may involve additional
+ * overhead from string object creation compared to primitive numeric types.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void roundTripStringDirect(final Blackhole blackhole) {
+ final JsonElement json = Codecs.STRING.encodeStart(this.ops, TEST_STRING).result().orElseThrow();
+ final Pair decoded = Codecs.STRING.decode(this.ops, json).result().orElseThrow();
+ blackhole.consume(decoded);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java
new file mode 100644
index 0000000..5720cfc
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * Codec-focused JMH benchmarks for the Aether DataFixers framework.
+ *
+ *
This package contains benchmarks that measure the performance of codec operations,
+ * including encoding (Java objects to serialized format) and decoding (serialized format
+ * to Java objects). These benchmarks establish baseline performance for the codec system
+ * and help identify bottlenecks in serialization pipelines.
Scaling with collection size, functional vs direct API overhead
+ *
+ *
+ *
+ *
Why Codec Benchmarks?
+ *
Codecs are fundamental to the DataFixer system, transforming data between typed
+ * Java objects and format-agnostic {@link de.splatgames.aether.datafixers.api.dynamic.Dynamic}
+ * representations. Understanding codec performance is essential for:
+ *
+ *
Baseline establishment: Primitive codecs set the lower bound for all
+ * codec operations; complex codecs compose these primitives
+ *
Bottleneck identification: Comparing encode vs decode reveals which
+ * direction is more expensive for a given type
+ *
Scaling analysis: Collection benchmarks show how performance changes
+ * with data volume
+ *
API comparison: Direct extraction vs functional composition may have
+ * different performance characteristics
{@link de.splatgames.aether.datafixers.benchmarks.concurrent concurrent} -
+ * Codec thread-safety is assumed; concurrent benchmarks validate this assumption
+ *
+ *
+ *
Supported Serialization Formats
+ *
These benchmarks use {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps}
+ * as the reference DynamicOps implementation. The codec system supports multiple formats:
+ *
+ *
JSON: GsonOps, JacksonJsonOps
+ *
YAML: SnakeYamlOps, JacksonYamlOps
+ *
TOML: JacksonTomlOps
+ *
XML: JacksonXmlOps
+ *
+ *
Future benchmarks may compare performance across different DynamicOps implementations.
+ *
+ * @see de.splatgames.aether.datafixers.benchmarks.codec.PrimitiveCodecBenchmark
+ * @see de.splatgames.aether.datafixers.benchmarks.codec.CollectionCodecBenchmark
+ * @see de.splatgames.aether.datafixers.api.codec.Codec
+ * @see de.splatgames.aether.datafixers.api.codec.Codecs
+ * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps
+ * @since 1.0.0
+ */
+package de.splatgames.aether.datafixers.benchmarks.codec;
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
new file mode 100644
index 0000000..a1830bf
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
@@ -0,0 +1,601 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.concurrent;
+
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
+import de.splatgames.aether.datafixers.api.fix.DataFixer;
+import de.splatgames.aether.datafixers.api.schema.Schema;
+import de.splatgames.aether.datafixers.api.schema.SchemaRegistry;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import de.splatgames.aether.datafixers.core.schema.SimpleSchemaRegistry;
+import de.splatgames.aether.datafixers.testkit.factory.MockSchemas;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Threads;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.SplittableRandom;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for concurrent DataFixer operations and thread-safety validation.
+ *
+ *
This benchmark measures the performance characteristics of the DataFixer system
+ * under concurrent load. It validates thread-safety of shared components and quantifies
+ * scalability across different thread counts. The results help identify contention
+ * points and ensure the framework performs well in multi-threaded environments.
+ *
+ *
Benchmark Categories
+ *
+ *
Concurrent Migration Benchmarks
+ *
Measure DataFixer performance when multiple threads perform migrations simultaneously:
+ *
+ *
{@link #concurrentSingleFix} - Maximum parallelism with single-fix migrations
+ *
{@link #concurrentChainMigration} - Maximum parallelism with 10-fix chain migrations
+ *
{@link #fourThreadMigration} - Fixed 4-thread migration for baseline comparison
+ *
{@link #eightThreadMigration} - Fixed 8-thread migration for scaling analysis
+ *
+ *
+ *
Concurrent Registry Access Benchmarks
+ *
Measure SchemaRegistry performance under concurrent read pressure:
+ *
+ *
{@link #concurrentRegistryLookup} - Random version lookups from multiple threads
All available CPU threads simultaneously apply a single DataFix to their
+ * respective input data. This benchmark stress-tests the thread-safety of the
+ * DataFixer implementation and measures maximum achievable throughput.
+ *
+ *
Key aspects measured:
+ *
+ *
Lock contention in shared DataFixer instance
+ *
Memory allocation pressure under concurrent load
+ *
Cache coherency effects from shared schema access
+ *
+ *
+ * @param s shared benchmark state containing the DataFixer and versions
+ * @param t per-thread state containing isolated input data
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(Threads.MAX)
+ public void concurrentSingleFix(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final Dynamic result = s.sharedFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ t.threadInput,
+ s.fromVersion,
+ s.toVersion
+ );
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks concurrent chain migrations with maximum thread parallelism.
+ *
+ *
All available CPU threads simultaneously apply a 10-fix chain migration.
+ * This benchmark combines the stress of concurrent access with the complexity
+ * of multi-step migrations, revealing performance characteristics under
+ * realistic high-load scenarios.
+ *
+ *
Compared to {@link #concurrentSingleFix}, this benchmark:
Exercises fix ordering and version traversal logic concurrently
+ *
Creates higher memory allocation rates per thread
+ *
+ *
+ * @param s shared benchmark state containing the chain DataFixer
+ * @param t per-thread state containing isolated input data
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(Threads.MAX)
+ public void concurrentChainMigration(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final Dynamic result = s.sharedChainFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ t.threadInput,
+ s.fromVersion,
+ s.chainToVersion
+ );
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks migration performance with exactly 4 concurrent threads.
+ *
+ *
Provides a fixed-thread baseline for comparing against variable-thread
+ * benchmarks. Four threads represent a typical server core count and help
+ * establish scaling characteristics between single-threaded and maximum
+ * parallelism scenarios.
+ *
+ *
Use this benchmark to:
+ *
+ *
Establish baseline concurrent performance on quad-core systems
+ *
Compare with {@link #eightThreadMigration} to measure scaling factor
+ *
Identify the point where adding threads provides diminishing returns
+ *
+ *
+ * @param s shared benchmark state containing the DataFixer
+ * @param t per-thread state containing isolated input data
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(4)
+ public void fourThreadMigration(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final Dynamic result = s.sharedFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ t.threadInput,
+ s.fromVersion,
+ s.toVersion
+ );
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks migration performance with exactly 8 concurrent threads.
+ *
+ *
Tests scaling beyond the 4-thread baseline. Eight threads represent
+ * a common server configuration and help identify whether the DataFixer
+ * implementation scales efficiently with additional parallelism.
+ *
+ *
Scaling analysis:
+ *
+ *
2x throughput vs 4 threads: Perfect linear scaling
+ *
1.5-2x throughput: Good scaling with minor contention
+ *
<1.5x throughput: Contention limiting scalability
+ *
≤1x throughput: Severe contention; investigate locking
+ *
+ *
+ * @param s shared benchmark state containing the DataFixer
+ * @param t per-thread state containing isolated input data
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(8)
+ public void eightThreadMigration(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final Dynamic result = s.sharedFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ t.threadInput,
+ s.fromVersion,
+ s.toVersion
+ );
+ blackhole.consume(result);
+ }
+
+ // ==================== Concurrent Registry Access Benchmarks ====================
+
+ /**
+ * Benchmarks concurrent random schema lookups from the registry.
+ *
+ *
All available threads perform random version lookups against a shared
+ * {@link SchemaRegistry} containing 100 schema versions. This benchmark
+ * validates the thread-safety and performance of registry read operations
+ * under heavy concurrent access.
+ *
+ *
The benchmark uses pre-computed random indices (via {@link ThreadState#nextRegistryIndex()})
+ * to avoid RNG contention affecting measurements. Each thread cycles through
+ * a 1024-element buffer of random indices.
+ *
+ *
Performance expectations:
+ *
+ *
Registry lookups should be lock-free and scale linearly
+ *
Cache effects may cause variance based on version access patterns
+ *
No write contention since registry is frozen before benchmarking
+ *
+ *
+ * @param s shared benchmark state containing the registry and versions
+ * @param t per-thread state providing random index sequence
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(Threads.MAX)
+ public void concurrentRegistryLookup(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final int index = t.nextRegistryIndex();
+ final Schema schema = s.sharedRegistry.get(s.registryVersions[index]);
+ blackhole.consume(schema);
+ }
+
+ /**
+ * Benchmarks concurrent latest-schema lookups from the registry.
+ *
+ *
All available threads repeatedly call {@link SchemaRegistry#latest()}
+ * on a shared registry. This represents the "hot path" optimization where
+ * applications frequently need the most recent schema version.
+ *
+ *
This benchmark helps validate:
+ *
+ *
Caching effectiveness for the latest schema reference
+ *
Memory visibility of the cached latest schema across threads
+ *
Absence of unnecessary synchronization on read-only access
+ *
+ *
+ *
Expected to outperform {@link #concurrentRegistryLookup} due to:
+ *
+ *
No version-to-schema map lookup required
+ *
Single cached reference rather than computed lookup
+ *
Better CPU cache utilization from accessing same memory location
+ *
+ *
+ * @param s shared benchmark state containing the registry
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(Threads.MAX)
+ public void concurrentLatestLookup(final BenchmarkState s,
+ final Blackhole blackhole) {
+ final Schema schema = s.sharedRegistry.latest();
+ blackhole.consume(schema);
+ }
+
+ // ==================== State Classes ====================
+
+ /**
+ * Shared benchmark state accessible by all threads.
+ *
+ *
This state class contains all resources that are shared across benchmark
+ * threads, simulating real-world scenarios where a single DataFixer instance
+ * serves multiple concurrent requests.
+ *
+ *
State initialization occurs once per trial (before warmup begins) to
+ * ensure consistent starting conditions across all measurement iterations.
+ *
+ *
Shared Resources
+ *
+ *
{@link #sharedFixer} - Single-fix DataFixer for basic migration benchmarks
+ *
{@link #sharedChainFixer} - 10-fix chain DataFixer for chain migration benchmarks
+ *
{@link #sharedRegistry} - Frozen SchemaRegistry with 100 versions for lookup benchmarks
+ *
Version constants - Pre-computed DataVersion instances to avoid allocation during measurement
+ *
+ */
+ @State(Scope.Benchmark)
+ public static class BenchmarkState {
+
+ /**
+ * The payload size parameter, injected by JMH.
+ *
+ *
Controls the complexity of generated test data for each thread.
+ * Only SMALL and MEDIUM sizes are used to balance benchmark runtime
+ * with meaningful performance differentiation.
+ *
+ * @see PayloadSize
+ */
+ @Param({"SMALL", "MEDIUM"})
+ public PayloadSize payloadSize;
+
+ /**
+ * Shared DataFixer configured with a single fix (v1 → v2).
+ *
+ *
Used by migration benchmarks that measure basic concurrent
+ * fix application without chain traversal overhead.
+ */
+ public DataFixer sharedFixer;
+
+ /**
+ * Shared DataFixer configured with a 10-fix chain (v1 → v11).
+ *
+ *
Used by {@link #concurrentChainMigration} to measure concurrent
+ * performance when applying multiple sequential fixes.
The registry is frozen after population to ensure thread-safe
+ * read access during benchmarks. Versions range from 10 to 1000
+ * in increments of 10.
+ */
+ public SchemaRegistry sharedRegistry;
+
+ /**
+ * Source version for all migrations (v1).
+ */
+ public DataVersion fromVersion;
+
+ /**
+ * Target version for single-fix migrations (v2).
+ */
+ public DataVersion toVersion;
+
+ /**
+ * Target version for chain migrations (v11).
+ */
+ public DataVersion chainToVersion;
+
+ /**
+ * Pre-computed DataVersion array for registry lookup benchmarks.
+ *
+ *
Contains 100 versions (10, 20, 30, ..., 1000) matching the
+ * schemas registered in {@link #sharedRegistry}. Pre-allocation
+ * avoids DataVersion object creation during measurement.
+ */
+ public DataVersion[] registryVersions;
+
+ /**
+ * Initializes all shared benchmark state.
+ *
+ *
Creates DataFixer instances, populates the SchemaRegistry with
+ * 100 versions, and pre-computes all version constants. The registry
+ * is frozen after population to enable lock-free concurrent reads.
+ */
+ @Setup(Level.Trial)
+ public void setup() {
+ this.sharedFixer = BenchmarkBootstrap.createSingleFixFixer();
+ this.sharedChainFixer = BenchmarkBootstrap.createChainFixer(10);
+
+ this.fromVersion = new DataVersion(1);
+ this.toVersion = new DataVersion(2);
+ this.chainToVersion = new DataVersion(11);
+
+ final SimpleSchemaRegistry registry = new SimpleSchemaRegistry();
+ this.registryVersions = new DataVersion[100];
+ for (int i = 0; i < 100; i++) {
+ final int version = (i + 1) * 10;
+ this.registryVersions[i] = new DataVersion(version);
+ registry.register(MockSchemas.minimal(version));
+ }
+ registry.freeze();
+ this.sharedRegistry = registry;
+ }
+ }
+
+ /**
+ * Per-thread benchmark state for isolated data and random access patterns.
+ *
+ *
This state class provides each benchmark thread with its own input data
+ * and random number generator to eliminate false sharing and contention on
+ * thread-local operations.
+ *
+ *
Design Rationale
+ *
+ *
Thread-local input: Each thread operates on its own Dynamic instance,
+ * preventing write contention and ensuring independent GC behavior
+ *
SplittableRandom: Faster and contention-free compared to
+ * {@link java.util.Random} which uses atomic CAS operations
+ *
Pre-computed indices: Random registry indices are generated during
+ * setup to avoid RNG overhead during measurement
+ *
+ *
+ *
Index Buffer Strategy
+ *
The {@link #registryIndexBuffer} uses a power-of-two size (1024) with
+ * bitwise AND masking for efficient wraparound without modulo operations.
+ * This provides pseudo-random access patterns while minimizing measurement
+ * overhead.
+ */
+ @State(Scope.Thread)
+ public static class ThreadState {
+
+ /**
+ * Size of the pre-computed random index buffer.
+ *
+ *
Power of two (1024) enables efficient wraparound via bitwise AND.
+ * Large enough to avoid pattern repetition affecting cache behavior
+ * during typical measurement windows.
+ */
+ private static final int INDEX_BUFFER_SIZE = 1024;
+
+ /**
+ * Bitmask for efficient modulo operation on buffer index.
+ *
+ *
Used as {@code cursor & INDEX_MASK} instead of {@code cursor % INDEX_BUFFER_SIZE}
+ * for faster wraparound calculation.
+ */
+ private static final int INDEX_MASK = INDEX_BUFFER_SIZE - 1;
+
+ /**
+ * Pre-computed random indices for registry lookup benchmarks.
+ *
+ *
Populated during iteration setup with random values in range
+ * [0, registryVersions.length). Accessed via {@link #nextRegistryIndex()}.
+ */
+ private final int[] registryIndexBuffer = new int[INDEX_BUFFER_SIZE];
+
+ /**
+ * Per-thread input data for migration benchmarks.
+ *
+ *
Regenerated at each iteration to ensure consistent memory allocation
+ * patterns and prevent cross-iteration caching effects.
+ */
+ public Dynamic threadInput;
+
+ /**
+ * Current position in the {@link #registryIndexBuffer}.
+ *
+ *
Incremented on each call to {@link #nextRegistryIndex()} and
+ * wrapped using {@link #INDEX_MASK}.
+ */
+ private int registryCursor;
+
+ /**
+ * Per-thread random number generator.
+ *
+ *
{@link SplittableRandom} is used instead of {@link java.util.Random}
+ * because it is faster and does not use atomic operations, eliminating
+ * contention when multiple threads generate random numbers.
+ */
+ private SplittableRandom random;
+
+ /**
+ * Initializes the per-thread random number generator.
+ *
+ *
Called once per trial. Uses a fixed seed (42) for reproducibility
+ * across benchmark runs, though each thread will produce different
+ * sequences due to {@link SplittableRandom}'s splittable nature.
+ */
+ @Setup(Level.Trial)
+ public void setupTrial() {
+ // Per-thread RNG avoids contention and is faster than java.util.Random.
+ this.random = new SplittableRandom(42L);
+ }
+
+ /**
+ * Regenerates input data and random indices for each iteration.
+ *
+ *
Fresh data generation per iteration ensures:
+ *
+ *
Consistent GC pressure across iterations
+ *
No JIT over-optimization on specific data patterns
+ *
Independent memory allocation per thread
+ *
+ *
+ *
The random index buffer is refilled with new random values to
+ * vary the registry access pattern across iterations.
+ *
+ * @param s the shared benchmark state providing payload size and version array
+ */
+ @Setup(Level.Iteration)
+ public void setupIteration(final BenchmarkState s) {
+ this.threadInput = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, s.payloadSize);
+
+ for (int i = 0; i < INDEX_BUFFER_SIZE; i++) {
+ this.registryIndexBuffer[i] = this.random.nextInt(s.registryVersions.length);
+ }
+ this.registryCursor = 0;
+ }
+
+ /**
+ * Returns the next pre-computed random index for registry lookups.
+ *
+ *
Retrieves the next value from {@link #registryIndexBuffer} and
+ * advances the cursor with efficient bitwise wraparound. This method
+ * is called during measurement and is optimized to minimize overhead.
+ *
+ * @return a random index in range [0, registryVersions.length)
+ */
+ public int nextRegistryIndex() {
+ return this.registryIndexBuffer[this.registryCursor++ & INDEX_MASK];
+ }
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java
new file mode 100644
index 0000000..9b374ee
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * Concurrency-focused JMH benchmarks for the Aether DataFixers framework.
+ *
+ *
This package contains benchmarks that measure performance characteristics under
+ * concurrent load. These benchmarks validate thread-safety of the DataFixer system,
+ * identify contention points, and quantify scalability across different thread counts.
Single-threaded benchmarks measure raw operation performance, but real-world
+ * applications often use the DataFixer system from multiple threads simultaneously.
+ * Concurrent benchmarks reveal:
+ *
+ *
Lock contention: Synchronization overhead in shared components
+ *
Cache coherency effects: Performance impact of shared data access
+ *
Scalability limits: Point at which adding threads stops improving throughput
+ *
Thread-safety validation: Correctness under concurrent access
The {@link de.splatgames.aether.datafixers.benchmarks.core core} package
+ * measures single-threaded baseline performance. Use concurrent benchmarks to:
+ *
+ *
Calculate concurrency overhead: {@code (single-threaded throughput × N threads) / actual throughput}
+ *
Identify scaling efficiency: {@code actual throughput / (single-threaded throughput × N threads)}
+ *
Detect regression: Compare concurrent results across code changes
+ *
+ *
+ * @see de.splatgames.aether.datafixers.benchmarks.concurrent.ConcurrentMigrationBenchmark
+ * @see de.splatgames.aether.datafixers.benchmarks.core
+ * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap
+ * @since 1.0.0
+ */
+package de.splatgames.aether.datafixers.benchmarks.concurrent;
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
new file mode 100644
index 0000000..2b3e535
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.core;
+
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
+import de.splatgames.aether.datafixers.api.fix.DataFixer;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for chained DataFix application performance.
+ *
+ *
Measures how fix chain length affects migration performance. This benchmark
+ * is essential for understanding the scalability characteristics of the DataFixer
+ * system when applying multiple sequential fixes.
+ *
+ *
Benchmark Methods
+ *
+ *
{@link #renameChain} - Chain of homogeneous field rename operations
+ *
{@link #mixedChain} - Chain of heterogeneous operations (renames, additions, transformations)
+ *
{@link #partialChain} - Partial chain execution stopping at halfway version
+ *
+ *
+ *
Parameters
+ *
+ *
Parameter
Values
Description
+ *
fixCount
1, 5, 10, 25, 50
Number of fixes in the chain
+ *
payloadSize
SMALL, MEDIUM
Input data complexity
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2 (for statistical significance)
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Microseconds
+ *
+ *
+ *
Interpreting Results
+ *
+ *
Linear scaling: Ideal behavior where time scales proportionally with fix count.
+ *
Sub-linear scaling: Better than expected, indicates optimization opportunities being exploited.
+ *
Super-linear scaling: Indicates potential performance issues with long chains.
+ *
Error (±): 99.9% confidence interval. Larger values with more fixes may indicate GC pressure.
+ *
+ *
+ *
Usage
+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar MultiFixChainBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar MultiFixChainBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific fix count and payload size
+ * java -jar benchmarks.jar MultiFixChainBenchmark -p fixCount=10 -p payloadSize=SMALL
+ *
+ * # Generate CSV output for analysis
+ * java -jar benchmarks.jar MultiFixChainBenchmark -rf csv -rff chain_results.csv
+ * }
+ *
+ * @author Erik Pförtner
+ * @see SingleFixBenchmark
+ * @see BenchmarkBootstrap#createChainFixer(int)
+ * @see BenchmarkBootstrap#createMixedFixer(int)
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class MultiFixChainBenchmark {
+
+ /**
+ * The number of fixes in the chain, injected by JMH.
+ *
+ *
This parameter controls the length of the fix chain being benchmarked.
+ * Higher values test the system's ability to handle long migration paths
+ * efficiently.
+ *
+ *
+ *
1: Baseline single-fix performance (compare with {@link SingleFixBenchmark})
+ *
5: Short chain typical of minor version updates
+ *
10: Medium chain representing moderate version gaps
+ *
25: Long chain simulating significant version jumps
+ *
50: Stress test for extended migration paths
+ *
+ */
+ @Param({"1", "5", "10", "25", "50"})
+ private int fixCount;
+
+ /**
+ * The payload size parameter, injected by JMH.
+ *
+ *
Controls the complexity of generated test data. Only SMALL and MEDIUM
+ * sizes are used to keep benchmark runtime reasonable while still capturing
+ * scaling behavior.
+ *
+ * @see PayloadSize
+ */
+ @Param({"SMALL", "MEDIUM"})
+ private PayloadSize payloadSize;
+
+ /**
+ * DataFixer configured with a chain of homogeneous field rename fixes.
+ *
+ *
Each fix in the chain performs a simple field rename operation (v{@code n} → v{@code n+1}).
+ * This represents the best-case scenario for chain execution.
+ */
+ private DataFixer chainFixer;
+
+ /**
+ * DataFixer configured with a chain of heterogeneous fix operations.
+ *
+ *
The chain includes a mix of rename, add, and transform operations to
+ * simulate realistic migration scenarios. Falls back to {@link #chainFixer}
+ * if mixed fixer creation fails.
Regenerated at each iteration to ensure consistent GC behavior
+ * and avoid caching effects.
+ */
+ private Dynamic input;
+
+ /**
+ * Source version for migrations (always v1).
+ */
+ private DataVersion fromVersion;
+
+ /**
+ * Target version for full chain migrations (v{@link #fixCount} + 1).
+ */
+ private DataVersion toVersion;
+
+ /**
+ * Target version for partial chain migrations (approximately half of {@link #toVersion}).
+ *
+ *
Used by {@link #partialChain} to measure performance when only part
+ * of the available fixes are applied.
+ */
+ private DataVersion halfwayToVersion;
+
+ /**
+ * Initializes the benchmark state once per trial.
+ *
+ *
Creates the chain and mixed fixers based on the current {@link #fixCount}
+ * parameter. Also calculates the version bounds for full and partial chain
+ * execution.
+ *
+ *
If mixed fixer creation fails (e.g., due to unsupported operations),
+ * the chain fixer is used as a fallback to ensure the benchmark can still run.
+ */
+ @Setup(Level.Trial)
+ public void setupTrial() {
+ this.chainFixer = BenchmarkBootstrap.createChainFixer(this.fixCount);
+
+ try {
+ this.mixedFixer = BenchmarkBootstrap.createMixedFixer(this.fixCount);
+ } catch (final RuntimeException ex) {
+ this.mixedFixer = this.chainFixer;
+ }
+
+ this.fromVersion = new DataVersion(1);
+ this.toVersion = new DataVersion(this.fixCount + 1);
+
+ final int halfwayVersion = Math.max(2, (this.fixCount / 2) + 1);
+ this.halfwayToVersion = new DataVersion(halfwayVersion);
+ }
+
+ /**
+ * Regenerates input data at each iteration.
+ *
+ *
Fresh data generation per iteration ensures that:
+ *
+ *
GC behavior is consistent across iterations
+ *
JIT optimizations don't over-specialize on specific data patterns
+ *
Memory allocation patterns are representative of real usage
+ *
+ */
+ @Setup(Level.Iteration)
+ public void setupIteration() {
+ this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, this.payloadSize);
+ }
+
+ /**
+ * Benchmarks a chain of homogeneous field rename operations.
+ *
+ *
Measures the performance of applying {@link #fixCount} sequential rename
+ * fixes to migrate data from v1 to v{@code fixCount+1}. This represents an
+ * optimistic scenario where all fixes perform the same lightweight operation.
+ *
+ *
Use this benchmark to establish baseline chain performance and detect
+ * any non-linear scaling behavior in the fix application pipeline.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void renameChain(final Blackhole blackhole) {
+ final Dynamic result = this.chainFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.input,
+ this.fromVersion,
+ this.toVersion);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks a chain of heterogeneous fix operations.
+ *
+ *
Measures the performance of applying {@link #fixCount} sequential fixes
+ * that include a mix of operations:
+ *
+ *
Field renames
+ *
Field additions with default values
+ *
Field transformations (type conversions, value mappings)
+ *
+ *
+ *
This benchmark provides a more realistic performance profile compared
+ * to {@link #renameChain}, as real-world migrations typically involve
+ * diverse operations.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void mixedChain(final Blackhole blackhole) {
+ final Dynamic result = this.mixedFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.input,
+ this.fromVersion,
+ this.toVersion);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks partial chain execution stopping at halfway version.
+ *
+ *
Measures the performance of applying only half of the available fixes
+ * in the chain. This simulates scenarios where:
+ *
+ *
Data is migrated incrementally rather than to the latest version
+ *
Target version is not the most recent available
+ *
Partial upgrades are performed for compatibility reasons
+ *
+ *
+ *
Comparing this benchmark with {@link #renameChain} reveals whether
+ * fix selection and version range calculations add significant overhead.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void partialChain(final Blackhole blackhole) {
+ final Dynamic result = this.chainFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.input,
+ this.fromVersion,
+ this.halfwayToVersion
+ );
+ blackhole.consume(result);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
new file mode 100644
index 0000000..0b72395
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.core;
+
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.schema.Schema;
+import de.splatgames.aether.datafixers.api.schema.SchemaRegistry;
+import de.splatgames.aether.datafixers.core.schema.SimpleSchemaRegistry;
+import de.splatgames.aether.datafixers.testkit.factory.MockSchemas;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.SplittableRandom;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for schema registry lookup performance.
+ *
+ *
Measures the overhead of various schema lookup operations as registry size grows.
+ * Schema lookups are performed frequently during data migration, so their performance directly impacts overall
+ * migration throughput.
+ *
+ *
Benchmark Methods
+ *
+ *
{@link #exactLookup} - Direct lookup by exact version match
+ *
{@link #floorLookup} - Floor lookup finding closest version ≤ target
+ *
{@link #latestLookup} - Retrieval of the most recent schema
+ *
{@link #sequentialLookup} - Sequential traversal of all registered versions
+ *
+ *
+ *
Parameters
+ *
+ *
Parameter
Values
Description
+ *
schemaCount
10, 50, 100, 500
Number of schemas in the registry
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2 (for statistical significance)
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Nanoseconds
+ *
+ *
+ *
Interpreting Results
+ *
+ *
O(1) lookups: {@link #exactLookup} and {@link #latestLookup} should show constant time regardless of registry size.
+ *
O(log n) lookups: {@link #floorLookup} may show logarithmic scaling if implemented via binary search.
+ *
O(n) lookups: {@link #sequentialLookup} should scale linearly with schema count.
+ *
Cache effects: Larger registries may show increased lookup time due to CPU cache pressure.
+ *
+ *
+ *
Usage
+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar SchemaLookupBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar SchemaLookupBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific schema count only
+ * java -jar benchmarks.jar SchemaLookupBenchmark -p schemaCount=100
+ *
+ * # Run specific lookup benchmark
+ * java -jar benchmarks.jar SchemaLookupBenchmark.exactLookup
+ * }
Measures the time to retrieve a schema by its exact registered version.
+ * This is the most common lookup pattern during migration when the source version is known precisely.
+ *
+ *
The benchmark uses pre-generated random indices to avoid RNG overhead
+ * in the measurement loop. Each invocation looks up a different random version to prevent branch prediction
+ * optimization.
+ *
+ * @param s the shared benchmark state containing the registry and versions
+ * @param t the per-thread state providing random lookup indices
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void exactLookup(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final int index = t.nextExactIndex();
+ final Schema schema = s.registry.get(s.versions[index]);
+ blackhole.consume(schema);
+ }
+
+ /**
+ * Benchmarks floor lookup performance.
+ *
+ *
Measures the time to retrieve a schema using floor semantics, where
+ * the registry returns the schema with the highest version ≤ the requested version. This pattern is used when
+ * data may be at intermediate versions not explicitly registered.
+ *
+ *
The lookup versions include both exact matches (10, 20, 30, ...) and
+ * in-between values (5, 15, 25, ...) to exercise both fast-path exact matches and slower floor searches.
+ *
+ * @param s the shared benchmark state containing the registry and lookup versions
+ * @param t the per-thread state providing random lookup indices
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void floorLookup(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final int index = t.nextFloorIndex();
+ final Schema schema = s.registry.get(s.lookupVersions[index]);
+ blackhole.consume(schema);
+ }
+
+ /**
+ * Benchmarks latest schema retrieval performance.
+ *
+ *
Measures the time to retrieve the most recent schema from the registry.
+ * This operation should be O(1) as the latest schema is typically cached or stored in a dedicated field.
+ *
+ *
This benchmark serves as a baseline for the fastest possible lookup
+ * operation and helps identify any unexpected overhead in the registry implementation.
+ *
+ * @param s the shared benchmark state containing the registry
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void latestLookup(final BenchmarkState s,
+ final Blackhole blackhole) {
+ final Schema schema = s.registry.latest();
+ blackhole.consume(schema);
+ }
+
+ /**
+ * Benchmarks sequential lookup of all registered schemas.
+ *
+ *
Measures the aggregate time to look up every schema in the registry
+ * in version order. This pattern occurs during schema validation, debugging, or when building migration path
+ * analyses.
+ *
+ *
Note: This benchmark performs multiple lookups per invocation
+ * ({@code schemaCount} lookups). The reported time is for the entire sequence, not per-lookup. Divide by
+ * {@code schemaCount} to get per-lookup overhead.
+ *
+ * @param s the shared benchmark state containing the registry and versions
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void sequentialLookup(final BenchmarkState s,
+ final Blackhole blackhole) {
+ for (final DataVersion version : s.versions) {
+ final Schema schema = s.registry.get(version);
+ blackhole.consume(schema);
+ }
+ }
+
+ /**
+ * Shared JMH state containing the schema registry and version arrays.
+ *
+ *
This state is shared across all threads within a benchmark trial
+ * ({@link Scope#Benchmark}). The registry is populated with mock schemas at versions 10, 20, 30, ... up to
+ * {@code schemaCount * 10}.
+ *
+ *
The registry is frozen after setup to match production usage patterns
+ * where registries are immutable during normal operation.
+ */
+ @State(Scope.Benchmark)
+ public static class BenchmarkState {
+
+ /**
+ * The number of schemas to register, injected by JMH.
+ *
+ *
Controls the size of the schema registry to measure lookup
+ * performance scaling:
+ *
+ *
10: Small registry, fits entirely in L1 cache
+ *
50: Medium registry, typical for most applications
+ *
100: Large registry, may exceed L1 cache
+ *
500: Stress test for registry scalability
+ *
+ */
+ @Param({"10", "50", "100", "500"})
+ public int schemaCount;
+
+ /**
+ * The frozen schema registry containing all registered schemas.
+ */
+ public SchemaRegistry registry;
+
+ /**
+ * Array of exact registered versions (10, 20, 30, ...).
+ *
+ *
Used by {@link #exactLookup} to ensure lookups always hit
+ * registered versions.
+ */
+ public DataVersion[] versions;
+
+ /**
+ * Array of lookup versions including in-between values (5, 10, 15, 20, ...).
+ *
+ *
Used by {@link #floorLookup} to exercise both exact matches
+ * and floor search behavior.
+ */
+ public DataVersion[] lookupVersions;
+
+ /**
+ * Initializes the schema registry and version arrays once per trial.
+ *
+ *
Creates a {@link SimpleSchemaRegistry} populated with minimal mock
+ * schemas at regular version intervals. The registry is frozen after population to enable any internal
+ * optimizations.
+ */
+ @Setup(Level.Trial)
+ public void setup() {
+ final SimpleSchemaRegistry simpleRegistry = new SimpleSchemaRegistry();
+ this.versions = new DataVersion[this.schemaCount];
+
+ for (int i = 0; i < this.schemaCount; i++) {
+ final int version = (i + 1) * 10;
+ final DataVersion dataVersion = new DataVersion(version);
+ this.versions[i] = dataVersion;
+ simpleRegistry.register(MockSchemas.minimal(version));
+ }
+
+ simpleRegistry.freeze();
+ this.registry = simpleRegistry;
+
+ this.lookupVersions = new DataVersion[this.schemaCount * 2];
+ for (int i = 0; i < this.lookupVersions.length; i++) {
+ this.lookupVersions[i] = new DataVersion((i + 1) * 5);
+ }
+ }
+ }
+
+ /**
+ * Per-thread JMH state providing pre-generated random lookup indices.
+ *
+ *
Random number generation is expensive and would dominate the benchmark
+ * if performed in the hot path. This state pre-generates buffers of random indices during setup, allowing the
+ * benchmark methods to retrieve indices via simple array access and bit masking.
+ *
+ *
Each thread has its own state instance ({@link Scope#Thread}) to avoid
+ * contention on shared RNG state. The fixed seed ensures reproducible results across benchmark runs.
+ *
+ * @see BenchmarkState
+ */
+ @State(Scope.Thread)
+ public static class ThreadState {
+
+ /**
+ * Size of the pre-generated index buffer.
+ *
+ *
Power-of-two size enables cheap index wrapping via bit masking
+ * instead of modulo operation.
+ */
+ private static final int INDEX_BUFFER_SIZE = 1024;
+
+ /**
+ * Bit mask for wrapping cursor to buffer bounds ({@code INDEX_BUFFER_SIZE - 1}).
+ */
+ private static final int INDEX_MASK = INDEX_BUFFER_SIZE - 1;
+
+ /**
+ * Pre-generated indices into {@link BenchmarkState#versions}.
+ */
+ private final int[] exactIndices = new int[INDEX_BUFFER_SIZE];
+
+ /**
+ * Pre-generated indices into {@link BenchmarkState#lookupVersions}.
+ */
+ private final int[] floorIndices = new int[INDEX_BUFFER_SIZE];
+
+ /**
+ * Current position in {@link #exactIndices}.
+ */
+ private int exactCursor;
+
+ /**
+ * Current position in {@link #floorIndices}.
+ */
+ private int floorCursor;
+
+ /**
+ * Thread-local random number generator for index generation.
+ */
+ private SplittableRandom random;
+
+ /**
+ * Initializes the random number generator once per trial.
+ *
+ *
Uses a fixed seed (42) for reproducibility. Each thread gets its
+ * own {@link SplittableRandom} instance to avoid synchronization overhead.
+ */
+ @Setup(Level.Trial)
+ public void setupTrial() {
+ this.random = new SplittableRandom(42L);
+ }
+
+ /**
+ * Refills the index buffers at each iteration.
+ *
+ *
Generates fresh random indices based on the current
+ * {@link BenchmarkState#schemaCount} parameter. Resets cursors to the beginning of each buffer.
+ *
+ * @param s the shared benchmark state providing array bounds
+ */
+ @Setup(Level.Iteration)
+ public void setupIteration(final BenchmarkState s) {
+ for (int i = 0; i < INDEX_BUFFER_SIZE; i++) {
+ this.exactIndices[i] = this.random.nextInt(s.versions.length);
+ this.floorIndices[i] = this.random.nextInt(s.lookupVersions.length);
+ }
+ this.exactCursor = 0;
+ this.floorCursor = 0;
+ }
+
+ /**
+ * Returns the next random index for exact version lookup.
+ *
+ *
Uses bit masking to wrap around the buffer efficiently.
+ *
+ * @return a random index into {@link BenchmarkState#versions}
+ */
+ public int nextExactIndex() {
+ return this.exactIndices[this.exactCursor++ & INDEX_MASK];
+ }
+
+ /**
+ * Returns the next random index for floor version lookup.
+ *
+ *
Uses bit masking to wrap around the buffer efficiently.
+ *
+ * @return a random index into {@link BenchmarkState#lookupVersions}
+ */
+ public int nextFloorIndex() {
+ return this.floorIndices[this.floorCursor++ & INDEX_MASK];
+ }
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
new file mode 100644
index 0000000..c74d288
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.core;
+
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
+import de.splatgames.aether.datafixers.api.fix.DataFixer;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for single DataFix application performance.
+ *
+ *
Measures the overhead of applying a single fix to data of varying sizes.
+ * Includes a baseline identity fix measurement to isolate framework overhead from actual transformation costs.
+ *
+ *
Benchmark Methods
+ *
+ *
{@link #identityFix} - Baseline measurement with no-op transformation
+ *
{@link #singleRenameFix} - Single field rename operation
+ *
{@link #playerDataFix} - Complex object transformation with codec roundtrip
+ *
{@link #playerDataFixEndToEnd} - Full pipeline including setup overhead
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2 (for statistical significance)
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Microseconds
+ *
+ *
+ *
Interpreting Results
+ *
+ *
Throughput (ops/us): Higher is better. Operations per microsecond.
+ *
Average Time (us/op): Lower is better. Microseconds per operation.
+ *
Error (±): 99.9% confidence interval. Smaller means more stable results.
+ *
+ *
+ *
Usage
+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar SingleFixBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar SingleFixBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific payload size only
+ * java -jar benchmarks.jar SingleFixBenchmark -p payloadSize=SMALL
+ * }
Measures the performance of renaming one field in the input data.
+ * This represents a common, lightweight migration operation. The benchmark is parameterized by {@link PayloadSize}
+ * to measure scaling behavior.
+ *
+ * @param s the shared benchmark state containing fixer and input data
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void singleRenameFix(final SizedState s,
+ final Blackhole blackhole) {
+ blackhole.consume(s.fixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ s.input,
+ s.fromVersion,
+ s.toVersion));
+ }
+
+ /**
+ * Benchmarks the identity (no-op) fix as a baseline measurement.
+ *
+ *
Measures pure framework overhead without any actual data transformation.
+ * Use this as a baseline to calculate the true cost of transformations by subtracting identity time from other
+ * benchmark results.
+ *
+ * @param s the shared benchmark state containing identity fixer and input data
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void identityFix(final SizedState s,
+ final Blackhole blackhole) {
+ blackhole.consume(s.identityFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ s.input,
+ s.fromVersion,
+ s.toVersion));
+ }
+
+ /**
+ * Benchmarks a complex player data transformation with codec roundtrip.
+ *
+ *
Measures the performance of a realistic migration scenario where data
+ * is decoded via codec, transformed, and re-encoded. This represents the upper bound of migration cost for complex
+ * object transformations.
+ *
+ *
This benchmark is expected to be significantly slower than {@link #singleRenameFix}
+ * because codec roundtrips involve reflection, object instantiation, and full serialization/deserialization
+ * cycles.
+ *
+ * @param s the shared player benchmark state
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void playerDataFix(final PlayerState s,
+ final Blackhole blackhole) {
+ blackhole.consume(s.playerFixer.update(
+ BenchmarkBootstrap.PLAYER_TYPE,
+ s.playerInput,
+ s.fromVersion,
+ s.toVersion));
+ }
+
+ /**
+ * Benchmarks the complete end-to-end pipeline including setup overhead.
+ *
+ *
Measures the total cost of a migration including:
+ *
+ *
Test data generation
+ *
DataFixer bootstrap and initialization
+ *
Actual migration execution
+ *
+ *
+ *
This benchmark is useful for understanding cold-start performance
+ * and the cost of creating new DataFixer instances. In production code,
+ * DataFixers should be reused rather than recreated per-operation.
+ *
+ *
Note: Results will be significantly slower than {@link #playerDataFix}
+ * due to setup overhead included in each iteration.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void playerDataFixEndToEnd(final Blackhole blackhole) {
+ final Dynamic playerInput = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE);
+ final DataFixer playerFixer = BenchmarkBootstrap.createPlayerFixer();
+ blackhole.consume(playerFixer.update(
+ BenchmarkBootstrap.PLAYER_TYPE,
+ playerInput,
+ new DataVersion(1),
+ new DataVersion(2)));
+ }
+
+ /**
+ * Shared JMH state for benchmarks parameterized by payload size.
+ *
+ *
This state is shared across all threads within a benchmark trial
+ * ({@link Scope#Benchmark}). The {@link #payloadSize} parameter controls the complexity of test data:
+ *
+ *
+ *
SMALL: 5 fields, 2 nesting levels, 10 array elements
+ *
MEDIUM: 20 fields, 4 nesting levels, 100 array elements
+ *
LARGE: 50 fields, 6 nesting levels, 1000 array elements
+ *
+ *
+ * @see PayloadSize
+ */
+ @State(Scope.Benchmark)
+ public static class SizedState {
+
+ /**
+ * The payload size parameter, injected by JMH. Controls the complexity of generated test data.
+ */
+ @Param({"SMALL", "MEDIUM", "LARGE"})
+ public PayloadSize payloadSize;
+
+ /**
+ * DataFixer configured with a single field rename fix (v1 → v2).
+ */
+ public DataFixer fixer;
+
+ /**
+ * DataFixer configured with an identity (no-op) fix for baseline measurement.
+ */
+ public DataFixer identityFixer;
+
+ /**
+ * Pre-generated input data matching {@link #payloadSize}.
+ */
+ public Dynamic input;
+
+ /**
+ * Source version for migrations (v1).
+ */
+ public DataVersion fromVersion;
+
+ /**
+ * Target version for migrations (v2).
+ */
+ public DataVersion toVersion;
+
+ /**
+ * Initializes the benchmark state once per trial.
+ *
+ *
Creates fixers and generates test data based on the current
+ * {@link #payloadSize} parameter value.
This state is separate from {@link SizedState} because the player benchmark
+ * uses a fixed, realistic data structure rather than parameterized payload sizes. The player data simulates a
+ * typical game entity with nested objects, arrays, and various field types.
+ *
+ *
The player fix performs a complete codec roundtrip transformation,
+ * making it representative of real-world migration scenarios where data is decoded, transformed, and
+ * re-encoded.
+ *
+ * @see BenchmarkBootstrap#createPlayerFixer()
+ * @see BenchmarkDataGenerator#generatePlayerData
+ */
+ @State(Scope.Benchmark)
+ public static class PlayerState {
+
+ /**
+ * DataFixer configured with a player-specific transformation fix. Performs codec decode → transform → encode
+ * cycle.
+ */
+ public DataFixer playerFixer;
+
+ /**
+ * Pre-generated player data structure with realistic game entity fields.
+ */
+ public Dynamic playerInput;
+
+ /**
+ * Source version for migrations (v1).
+ */
+ public DataVersion fromVersion;
+
+ /**
+ * Target version for migrations (v2).
+ */
+ public DataVersion toVersion;
+
+ /**
+ * Initializes the player benchmark state once per trial.
+ *
+ *
Creates the player fixer and generates realistic player test data.
+ */
+ @Setup(Level.Trial)
+ public void setup() {
+ this.playerFixer = BenchmarkBootstrap.createPlayerFixer();
+ this.playerInput = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE);
+ this.fromVersion = new DataVersion(1);
+ this.toVersion = new DataVersion(2);
+ }
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java
new file mode 100644
index 0000000..32b058f
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * Core JMH benchmarks for the Aether DataFixers framework.
+ *
+ *
This package contains benchmarks that measure the fundamental performance characteristics
+ * of the data fixer system, including fix application, chain execution, and schema registry
+ * operations. These benchmarks form the foundation for performance regression testing and
+ * optimization efforts.
Isolation: Each benchmark measures a single operation to isolate performance characteristics.
+ *
Parameterization: Benchmarks are parameterized to capture scaling behavior across different input sizes.
+ *
Reproducibility: Fixed seeds and deterministic data generation ensure reproducible results.
+ *
JMH Best Practices: All benchmarks follow JMH guidelines including proper use of {@code Blackhole},
+ * state scoping, and setup level annotations.
+ *
+ *
+ *
Interpreting Results
+ *
All benchmarks in this package report both throughput (ops/time) and average time (time/op).
+ * When comparing results:
+ *
+ *
Compare measurements from the same JVM version and hardware
+ *
Consider the 99.9% confidence interval (error bounds)
+ *
Run multiple forks to account for JIT compilation variance
+ *
Use baseline benchmarks (e.g., identity fix) to isolate framework overhead
+ *
+ *
+ * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap
+ * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator
+ * @since 1.0.0
+ */
+package de.splatgames.aether.datafixers.benchmarks.core;
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
new file mode 100644
index 0000000..ac0bce9
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.format;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps;
+import de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps;
+import de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for cross-format conversion performance between DynamicOps implementations.
+ *
+ *
This benchmark measures the overhead of converting data between different
+ * serialization formats using the {@code DynamicOps.convertTo()} mechanism. Cross-format
+ * conversion is essential when integrating systems that use different data formats
+ * or when migrating data through format-agnostic DataFixers.
+ *
+ *
Conversion Pairs Benchmarked
+ *
+ *
JSON Library Conversions
+ *
+ *
{@link #gsonToJackson} - Gson JsonElement → Jackson JsonNode
+ *
{@link #jacksonToGson} - Jackson JsonNode → Gson JsonElement
Measures the overhead of converting between two JSON libraries.
+ * Both represent JSON but use different internal tree structures.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void gsonToJackson(final Blackhole blackhole) {
+ final JsonNode result = this.jacksonJsonOps.convertTo(this.gsonOps, this.gsonRoot);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks conversion from Jackson JsonNode to Gson JsonElement.
+ *
+ *
Measures the reverse conversion from Jackson to Gson representation.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void jacksonToGson(final Blackhole blackhole) {
+ final JsonElement result = this.gsonOps.convertTo(this.jacksonJsonOps, this.jacksonJsonRoot);
+ blackhole.consume(result);
+ }
+
+ // ==================== Gson <-> SnakeYAML Conversions ====================
+
+ /**
+ * Benchmarks conversion from Gson JsonElement to SnakeYAML native types.
+ *
+ *
Measures cross-ecosystem conversion from JSON library to YAML library.
+ * SnakeYAML uses native Java Maps and Lists internally.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void gsonToSnakeYaml(final Blackhole blackhole) {
+ final Object result = this.snakeYamlOps.convertTo(this.gsonOps, this.gsonRoot);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks conversion from SnakeYAML native types to Gson JsonElement.
+ *
+ *
Measures cross-ecosystem conversion from YAML native types to JSON tree.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void snakeYamlToGson(final Blackhole blackhole) {
+ final JsonElement result = this.gsonOps.convertTo(this.snakeYamlOps, this.snakeYamlRoot);
+ blackhole.consume(result);
+ }
+
+ // ==================== Jackson JSON <-> Jackson YAML Conversions ====================
+
+ /**
+ * Benchmarks conversion from Jackson JSON to Jackson YAML.
+ *
+ *
Measures conversion within the Jackson ecosystem. Both formats use
+ * JsonNode internally, potentially enabling optimizations.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void jacksonJsonToYaml(final Blackhole blackhole) {
+ final JsonNode result = this.jacksonYamlOps.convertTo(this.jacksonJsonOps, this.jacksonJsonRoot);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks conversion from Jackson YAML to Jackson JSON.
+ *
+ *
Measures reverse conversion within the Jackson ecosystem.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void jacksonYamlToJson(final Blackhole blackhole) {
+ final JsonNode result = this.jacksonJsonOps.convertTo(this.jacksonYamlOps, this.jacksonYamlRoot);
+ blackhole.consume(result);
+ }
+
+ // ==================== SnakeYAML <-> Jackson YAML Conversions ====================
+
+ /**
+ * Benchmarks conversion from SnakeYAML native types to Jackson YAML JsonNode.
+ *
+ *
Measures conversion between two YAML libraries with different internal
+ * representations (native Java types vs JsonNode).
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void snakeYamlToJacksonYaml(final Blackhole blackhole) {
+ final JsonNode result = this.jacksonYamlOps.convertTo(this.snakeYamlOps, this.snakeYamlRoot);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks conversion from Jackson YAML JsonNode to SnakeYAML native types.
+ *
+ *
Measures reverse conversion from JsonNode to native Java Maps/Lists.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void jacksonYamlToSnakeYaml(final Blackhole blackhole) {
+ final Object result = this.snakeYamlOps.convertTo(this.jacksonYamlOps, this.jacksonYamlRoot);
+ blackhole.consume(result);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
new file mode 100644
index 0000000..1a87c58
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.format;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
+import de.splatgames.aether.datafixers.api.fix.DataFixer;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.jetbrains.annotations.Nullable;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark comparing JSON DynamicOps implementations: Gson vs Jackson.
+ *
+ *
This benchmark measures the performance of JSON-based operations using two
+ * different underlying libraries: Google Gson ({@link GsonOps}) and Jackson Databind
+ * ({@link JacksonJsonOps}). The results help determine which implementation is more
+ * suitable for specific use cases.
+ *
+ *
Benchmark Categories
+ *
+ *
Data Generation
+ *
Measure Dynamic object construction performance:
+ *
+ *
{@link #gsonGenerate} - Create Dynamic using GsonOps
+ *
{@link #jacksonGenerate} - Create Dynamic using JacksonJsonOps
+ *
+ *
+ *
Field Access
+ *
Measure field read operations on existing data:
+ *
+ *
{@link #gsonFieldRead} - Read field from Gson-backed Dynamic
+ *
{@link #jacksonFieldRead} - Read field from Jackson-backed Dynamic
+ *
+ *
+ *
Field Modification
+ *
Measure field write/set operations:
+ *
+ *
{@link #gsonFieldSet} - Set field on Gson-backed Dynamic
+ *
{@link #jacksonFieldSet} - Set field on Jackson-backed Dynamic
+ *
+ *
+ *
Migration
+ *
Measure DataFixer migration performance:
+ *
+ *
{@link #gsonMigration} - Apply fix to Gson-backed data
+ *
{@link #jacksonMigration} - Apply fix to Jackson-backed data
Measures the time to apply a single fix migration to Gson-based
+ * Dynamic data. This represents the typical migration scenario where
+ * both fixer and data use the same DynamicOps implementation.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void gsonMigration(final Blackhole blackhole) {
+ final Dynamic result = this.gsonFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.gsonData,
+ this.fromVersion,
+ this.toVersion
+ );
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks DataFixer migration on Jackson-backed data.
+ *
+ *
If a dedicated Jackson fixer is available, measures native Jackson
+ * migration. Otherwise, falls back to cross-format migration using the
+ * Gson-based fixer with Jackson input data.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void jacksonMigration(final Blackhole blackhole) {
+ if (this.jacksonFixer == null) {
+ // No dedicated Jackson fixer available -> this would not be a fair "Jackson migration" benchmark.
+ // Measure the cross-format behavior explicitly instead.
+ final Dynamic result = this.gsonFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.jacksonData,
+ this.fromVersion,
+ this.toVersion
+ );
+ blackhole.consume(result);
+ return;
+ }
+
+ final Dynamic result = this.jacksonFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.jacksonData,
+ this.fromVersion,
+ this.toVersion
+ );
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks cross-format migration with Jackson input and Gson-based fixer.
+ *
+ *
Measures the performance overhead when the fixer's DynamicOps differs
+ * from the input data's DynamicOps. This scenario is common when migrating
+ * data from various sources through a centralized fixer.
+ *
+ *
Comparing this benchmark with {@link #gsonMigration} reveals the
+ * overhead of format conversion during migration.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void crossFormatMigrationJacksonInput(final Blackhole blackhole) {
+ final Dynamic result = this.gsonFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.jacksonData,
+ this.fromVersion,
+ this.toVersion
+ );
+ blackhole.consume(result);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
new file mode 100644
index 0000000..2dc134c
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.format;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
+import de.splatgames.aether.datafixers.api.fix.DataFixer;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.toml.jackson.JacksonTomlOps;
+import de.splatgames.aether.datafixers.codec.xml.jackson.JacksonXmlOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for TOML and XML DynamicOps implementations via Jackson.
+ *
+ *
This benchmark measures the performance of TOML and XML format operations
+ * using Jackson-based implementations ({@link JacksonTomlOps} and {@link JacksonXmlOps}).
+ * Both formats share Jackson's unified API, enabling direct performance comparison.
+ *
+ *
Benchmark Categories
+ *
+ *
Data Generation
+ *
Measure Dynamic object construction performance:
+ *
+ *
{@link #tomlGenerate} - Create Dynamic using JacksonTomlOps
+ *
{@link #xmlGenerate} - Create Dynamic using JacksonXmlOps
+ *
+ *
+ *
Field Access
+ *
Measure field read operations on existing data:
+ *
+ *
{@link #tomlFieldRead} - Read field from TOML-backed Dynamic
+ *
{@link #xmlFieldRead} - Read field from XML-backed Dynamic
+ *
+ *
+ *
Field Modification
+ *
Measure field write/set operations:
+ *
+ *
{@link #tomlFieldSet} - Set field on TOML-backed Dynamic
+ *
{@link #xmlFieldSet} - Set field on XML-backed Dynamic
+ *
+ *
+ *
Migration
+ *
Measure DataFixer migration performance:
+ *
+ *
{@link #tomlMigration} - Apply fix to TOML-backed data
+ *
{@link #xmlMigration} - Apply fix to XML-backed data
+ *
+ *
+ *
Implementations
+ *
+ *
Implementation
Library
Node Type
Use Case
+ *
+ *
{@link JacksonTomlOps}
+ *
Jackson Dataformat TOML
+ *
{@code JsonNode}
+ *
Configuration files, Rust ecosystem integration
+ *
+ *
+ *
{@link JacksonXmlOps}
+ *
Jackson Dataformat XML
+ *
{@code JsonNode}
+ *
Legacy systems, SOAP/REST APIs, document formats
+ *
+ *
+ *
+ *
Parameters
+ *
+ *
Parameter
Values
Description
+ *
payloadSize
SMALL, MEDIUM
Test data complexity (LARGE excluded for performance)
+ *
+ *
+ *
Note: The LARGE payload size is excluded from this benchmark because
+ * TOML and XML serialization typically have higher overhead than JSON/YAML,
+ * making large payloads impractical for typical use cases.
Measures the time to apply a single fix migration to TOML-based
+ * Dynamic data.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void tomlMigration(final Blackhole blackhole) {
+ final Dynamic result = this.fixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.tomlData,
+ this.fromVersion,
+ this.toVersion
+ );
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks DataFixer migration on XML-backed data.
+ *
+ *
Measures the time to apply a single fix migration to XML-based
+ * Dynamic data.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void xmlMigration(final Blackhole blackhole) {
+ final Dynamic result = this.fixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.xmlData,
+ this.fromVersion,
+ this.toVersion
+ );
+ blackhole.consume(result);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
new file mode 100644
index 0000000..c0f2862
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.format;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
+import de.splatgames.aether.datafixers.api.fix.DataFixer;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps;
+import de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark comparing YAML DynamicOps implementations: SnakeYAML vs Jackson YAML.
+ *
+ *
This benchmark measures the performance of YAML-based operations using two
+ * different underlying libraries: SnakeYAML ({@link SnakeYamlOps}) and Jackson YAML
+ * ({@link JacksonYamlOps}). YAML is commonly used for configuration files and
+ * human-readable data serialization.
+ *
+ *
Benchmark Categories
+ *
+ *
Data Generation
+ *
Measure Dynamic object construction performance:
+ *
+ *
{@link #snakeYamlGenerate} - Create Dynamic using SnakeYamlOps
+ *
{@link #jacksonYamlGenerate} - Create Dynamic using JacksonYamlOps
+ *
+ *
+ *
Field Access
+ *
Measure field read operations on existing data:
+ *
+ *
{@link #snakeYamlFieldRead} - Read field from SnakeYAML-backed Dynamic
+ *
{@link #jacksonYamlFieldRead} - Read field from Jackson YAML-backed Dynamic
+ *
+ *
+ *
Field Modification
+ *
Measure field write/set operations:
+ *
+ *
{@link #snakeYamlFieldSet} - Set field on SnakeYAML-backed Dynamic
+ *
{@link #jacksonYamlFieldSet} - Set field on Jackson YAML-backed Dynamic
+ *
+ *
+ *
Migration
+ *
Measure DataFixer migration performance:
+ *
+ *
{@link #snakeYamlMigration} - Apply fix to SnakeYAML-backed data
+ *
{@link #jacksonYamlMigration} - Apply fix to Jackson YAML-backed data
+ *
+ *
+ *
Implementations Compared
+ *
+ *
Implementation
Library
Node Type
Characteristics
+ *
+ *
{@link SnakeYamlOps}
+ *
SnakeYAML
+ *
{@code Object} (native Java types)
+ *
Native YAML library, uses Maps/Lists, anchors & aliases support
+ *
+ *
+ *
{@link JacksonYamlOps}
+ *
Jackson Dataformat YAML
+ *
{@code JsonNode}
+ *
Unified Jackson API, shares code with JSON, streaming support
+ *
+ *
+ *
+ *
Parameters
+ *
+ *
Parameter
Values
Description
+ *
payloadSize
SMALL, MEDIUM, LARGE
Test data complexity
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Microseconds
+ *
+ *
+ *
Usage
+ *
{@code
+ * # Run all YAML benchmarks
+ * java -jar benchmarks.jar YamlBenchmark
+ *
+ * # Compare only generation performance
+ * java -jar benchmarks.jar "YamlBenchmark.*Generate"
+ *
+ * # Run SnakeYAML-only benchmarks
+ * java -jar benchmarks.jar "YamlBenchmark.snakeYaml.*"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar YamlBenchmark -p payloadSize=MEDIUM
+ * }