diff --git a/.gitignore b/.gitignore index 5232b33..dcc6e4a 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,6 @@ bin/ # Claude Code /.claude/ /CLAUDE.md + +# GitHub +current-ticket.md \ No newline at end of file diff --git a/aether-datafixers-benchmarks/pom.xml b/aether-datafixers-benchmarks/pom.xml new file mode 100644 index 0000000..ef8e7ba --- /dev/null +++ b/aether-datafixers-benchmarks/pom.xml @@ -0,0 +1,174 @@ + + + 4.0.0 + + + de.splatgames.aether.datafixers + aether-datafixers + 1.0.0-SNAPSHOT + + + aether-datafixers-benchmarks + jar + + Aether Datafixers :: Benchmarks + JMH microbenchmarks for Aether Datafixers performance analysis. + + + + true + true + + true + + de.splatgames.aether.datafixers.benchmarks.BenchmarkRunner + + + + + + de.splatgames.aether.datafixers + aether-datafixers-api + + + de.splatgames.aether.datafixers + aether-datafixers-core + + + de.splatgames.aether.datafixers + aether-datafixers-codec + + + de.splatgames.aether.datafixers + aether-datafixers-testkit + + + + + org.openjdk.jmh + jmh-core + + + org.openjdk.jmh + jmh-generator-annprocess + provided + + + + + com.google.code.gson + gson + + + com.fasterxml.jackson.core + jackson-databind + + + + + org.yaml + snakeyaml + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-toml + + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-xml + + + + + com.google.guava + guava + + + + + org.jetbrains + annotations + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + + + + + + org.apache.maven.plugins + maven-enforcer-plugin + + + + + org.apache.maven.plugins + maven-shade-plugin + ${plugin.shade.version} + + + package + + shade + + + + + org.openjdk.jmh.Main + + + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + META-INF/MANIFEST.MF + + + + false + true + benchmarks + + + + + + + + org.codehaus.mojo + exec-maven-plugin + 3.1.0 + + ${main.class} + + + + + diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java new file mode 100644 index 0000000..beb5766 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks; + +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.Options; +import org.openjdk.jmh.runner.options.OptionsBuilder; + +import java.io.IOException; + +/** + * Main entry point for running Aether Datafixers JMH benchmarks. + * + *

This class provides both a command-line interface and programmatic API for + * executing benchmarks. It supports all standard JMH options while providing + * convenient preset configurations for common benchmark scenarios.

+ * + *

Execution Methods

+ * + *

Via Maven exec:java (Development)

+ *

Quick way to run benchmarks during development without building a JAR:

+ *
{@code
+ * # Run all benchmarks with default settings
+ * mvn exec:java -pl aether-datafixers-benchmarks
+ *
+ * # Run with JMH arguments
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="-h"
+ *
+ * # Run specific benchmark pattern
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="SingleFixBenchmark"
+ * }
+ * + *

Via Fat JAR (Production)

+ *

Recommended for production benchmark runs with full JMH isolation:

+ *
{@code
+ * # Build the fat JAR
+ * mvn clean package -pl aether-datafixers-benchmarks -DskipTests
+ *
+ * # Run all benchmarks
+ * java -jar aether-datafixers-benchmarks/target/aether-datafixers-benchmarks-*-benchmarks.jar
+ *
+ * # Run specific benchmark
+ * java -jar target/*-benchmarks.jar SingleFixBenchmark
+ *
+ * # Run with custom parameters
+ * java -jar target/*-benchmarks.jar -p payloadSize=LARGE -wi 3 -i 5 -f 1
+ *
+ * # Output JSON results for analysis
+ * java -jar target/*-benchmarks.jar -rf json -rff results.json
+ *
+ * # List all available benchmarks
+ * java -jar target/*-benchmarks.jar -l
+ *
+ * # Profile with async-profiler
+ * java -jar target/*-benchmarks.jar -prof async:output=flamegraph
+ * }
+ * + *

Available Benchmark Categories

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
CategoryBenchmarksFocus
CoreSingleFixBenchmark, MultiFixChainBenchmark, SchemaLookupBenchmarkDataFixer migration performance
FormatJsonBenchmark, YamlBenchmark, TomlXmlBenchmark, CrossFormatBenchmarkDynamicOps format comparisons
CodecPrimitiveCodecBenchmark, CollectionCodecBenchmarkSerialization/deserialization
ConcurrentConcurrentMigrationBenchmarkThread-safety and scalability
+ * + *

Programmatic API

+ *

For integration with test frameworks or custom tooling:

+ *
{@code
+ * // Run all benchmarks
+ * BenchmarkRunner.runAllBenchmarks();
+ *
+ * // Run quick validation (CI/CD)
+ * BenchmarkRunner.runQuickBenchmarks();
+ *
+ * // Run only core benchmarks
+ * BenchmarkRunner.runCoreBenchmarks();
+ *
+ * // Run only format benchmarks
+ * BenchmarkRunner.runFormatBenchmarks();
+ * }
+ * + *

Default Configuration

+ * + * + * + * + * + * + *
SettingDefaultQuick Mode
Warmup iterations52
Measurement iterations103
Forks21
JVM heap2 GB1 GB
+ * + *

Common JMH Options

+ * + * + * + * + * + * + * + * + * + * + * + *
OptionDescriptionExample
{@code -wi}Warmup iterations{@code -wi 3}
{@code -i}Measurement iterations{@code -i 5}
{@code -f}Number of forks{@code -f 1}
{@code -p}Parameter value{@code -p payloadSize=SMALL}
{@code -t}Thread count{@code -t 4}
{@code -rf}Result format{@code -rf json}
{@code -rff}Result file{@code -rff results.json}
{@code -l}List benchmarks{@code -l}
{@code -prof}Profiler{@code -prof gc}
+ * + * @author Erik Pförtner + * @see de.splatgames.aether.datafixers.benchmarks.core + * @see de.splatgames.aether.datafixers.benchmarks.codec + * @see de.splatgames.aether.datafixers.benchmarks.concurrent + * @since 1.0.0 + */ +public final class BenchmarkRunner { + + /** + * Private constructor to prevent instantiation. + */ + private BenchmarkRunner() { + // Main class + } + + /** + * Main entry point for running benchmarks from the command line. + * + *

Behavior depends on whether arguments are provided:

+ * + * + *

Exit Codes

+ * + * + * @param args command-line arguments (passed directly to JMH if present) + * @throws RunnerException if benchmark execution fails + * @throws IOException if there is an I/O error reading benchmark metadata + */ + public static void main(final String[] args) throws RunnerException, IOException { + if (args.length > 0) { + // If arguments are provided, delegate to JMH main + org.openjdk.jmh.Main.main(args); + } else { + // Run with default options + runAllBenchmarks(); + } + } + + /** + * Runs all benchmarks in the benchmarks package with default configuration. + * + *

Executes every benchmark class in + * {@code de.splatgames.aether.datafixers.benchmarks.*} with production-quality + * settings suitable for reliable performance measurements.

+ * + *

Configuration

+ * + * + *

Note: Running all benchmarks can take significant time depending + * on the number of parameter combinations. Consider using + * {@link #runQuickBenchmarks()} for validation or {@link #runCoreBenchmarks()} + * for focused testing.

+ * + * @throws RunnerException if benchmark execution fails + * @see #runQuickBenchmarks() + * @see #runCoreBenchmarks() + */ + public static void runAllBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\..*") + .warmupIterations(5) + .measurementIterations(10) + .forks(2) + .jvmArgs("-Xms2G", "-Xmx2G") + .build(); + + new Runner(options).run(); + } + + /** + * Runs a quick subset of benchmarks for fast validation. + * + *

Executes only the {@code SingleFixBenchmark} with minimal iterations, + * suitable for:

+ * + * + *

Configuration

+ * + * + *

Warning: Results from quick benchmarks should not be used for + * performance comparisons due to reduced statistical rigor.

+ * + * @throws RunnerException if benchmark execution fails + * @see #runAllBenchmarks() + */ + public static void runQuickBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.core\\.SingleFixBenchmark") + .warmupIterations(2) + .measurementIterations(3) + .forks(1) + .jvmArgs("-Xms1G", "-Xmx1G") + .param("payloadSize", "SMALL") + .build(); + + new Runner(options).run(); + } + + /** + * Runs only the core migration benchmarks. + * + *

Executes benchmarks in the {@code core} package that measure DataFixer + * migration performance:

+ * + * + *

Configuration

+ * + * + *

Use this method when focusing on migration performance without + * format-specific or codec overhead considerations.

+ * + * @throws RunnerException if benchmark execution fails + * @see #runFormatBenchmarks() + * @see #runAllBenchmarks() + */ + public static void runCoreBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.core\\..*") + .warmupIterations(5) + .measurementIterations(10) + .forks(2) + .jvmArgs("-Xms2G", "-Xmx2G") + .build(); + + new Runner(options).run(); + } + + /** + * Runs only the format comparison benchmarks. + * + *

Executes benchmarks in the {@code format} package that compare different + * DynamicOps implementations:

+ * + * + *

Configuration

+ * + * + *

Use this method when evaluating which DynamicOps implementation + * to use for a specific use case, or when optimizing format handling.

+ * + * @throws RunnerException if benchmark execution fails + * @see #runCoreBenchmarks() + * @see #runAllBenchmarks() + */ + public static void runFormatBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.format\\..*") + .warmupIterations(5) + .measurementIterations(10) + .forks(2) + .jvmArgs("-Xms2G", "-Xmx2G") + .build(); + + new Runner(options).run(); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java new file mode 100644 index 0000000..56405aa --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.codec; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.codec.Codec; +import de.splatgames.aether.datafixers.api.codec.Codecs; +import de.splatgames.aether.datafixers.api.result.DataResult; +import de.splatgames.aether.datafixers.api.util.Pair; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for collection codec encode/decode performance. + * + *

Measures the performance of list codec operations with parameterized collection + * sizes. These benchmarks reveal how codec performance scales with data volume and + * help identify potential bottlenecks in collection traversal and element processing.

+ * + *

Benchmark Categories

+ * + *

String List Benchmarks

+ *

Measure {@code List} codec operations:

+ * + * + *

Integer List Benchmarks

+ *

Measure {@code List} codec operations:

+ * + * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
listSize10, 100, 1000Number of elements in the test list
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2 (for JIT variance mitigation)
JVM Heap2 GB min/max
Time UnitMicroseconds (appropriate for collection operations)
+ * + *

Test Data Generation

+ * + * + * + * + *
CollectionElement PatternExample (size=3)
String List{@code "item-" + index}["item-0", "item-1", "item-2"]
Integer List{@code index}[0, 1, 2]
+ * + *

Interpreting Results

+ * + * + *

Usage

+ *
{@code
+ * # Run all collection codec benchmarks
+ * java -jar benchmarks.jar CollectionCodecBenchmark
+ *
+ * # Run with specific list size
+ * java -jar benchmarks.jar CollectionCodecBenchmark -p listSize=1000
+ *
+ * # Run only string list benchmarks
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.*String.*"
+ *
+ * # Run only encode benchmarks
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.encode.*"
+ *
+ * # Compare direct vs functional round-trip
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.roundTrip.*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar CollectionCodecBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate JSON report for analysis
+ * java -jar benchmarks.jar CollectionCodecBenchmark -rf json -rff collection_results.json
+ * }
+ * + * @author Erik Pförtner + * @see PrimitiveCodecBenchmark + * @see de.splatgames.aether.datafixers.api.codec.Codecs#list(Codec) + * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class CollectionCodecBenchmark { + + /** + * The number of elements in test lists, injected by JMH. + * + *

This parameter controls the size of both string and integer lists. + * Different sizes reveal scaling characteristics of the list codec:

+ * + */ + @Param({"10", "100", "1000"}) + private int listSize; + + /** + * The DynamicOps implementation used for all codec operations. + * + *

GsonOps is used as the reference JSON implementation for benchmarks.

+ */ + private GsonOps ops; + + /** + * Codec for encoding/decoding {@code List}. + * + *

Created via {@link Codecs#list(Codec)} wrapping {@link Codecs#STRING}.

+ */ + private Codec> stringListCodec; + + /** + * Codec for encoding/decoding {@code List}. + * + *

Created via {@link Codecs#list(Codec)} wrapping {@link Codecs#INT}.

+ */ + private Codec> intListCodec; + + /** + * Test string list populated with {@link #listSize} elements. + * + *

Elements follow the pattern "item-0", "item-1", ..., "item-(n-1)".

+ */ + private List stringList; + + /** + * Test integer list populated with {@link #listSize} elements. + * + *

Elements are sequential integers: 0, 1, 2, ..., (n-1).

+ */ + private List intList; + + /** + * Pre-encoded JSON array for string list decode benchmarks. + * + *

Created during setup to isolate decode performance from encoding overhead.

+ */ + private JsonElement encodedStringList; + + /** + * Pre-encoded JSON array for integer list decode benchmarks. + * + *

Created during setup to isolate decode performance from encoding overhead.

+ */ + private JsonElement encodedIntList; + + /** + * Initializes codecs, test data, and pre-encoded JSON elements. + * + *

This setup method:

+ *
    + *
  1. Creates list codecs by composing primitive codecs with {@link Codecs#list(Codec)}
  2. + *
  3. Populates test lists with {@link #listSize} elements each
  4. + *
  5. Pre-encodes both lists to JSON for decode benchmark isolation
  6. + *
+ * + *

Using {@link ArrayList} with pre-sized capacity avoids resizing overhead + * during population.

+ */ + @Setup(Level.Trial) + public void setup() { + this.ops = GsonOps.INSTANCE; + + this.stringListCodec = Codecs.list(Codecs.STRING); + this.intListCodec = Codecs.list(Codecs.INT); + + this.stringList = new ArrayList<>(this.listSize); + this.intList = new ArrayList<>(this.listSize); + + for (int i = 0; i < this.listSize; i++) { + this.stringList.add("item-" + i); + this.intList.add(i); + } + + this.encodedStringList = this.stringListCodec.encodeStart(this.ops, this.stringList) + .result().orElseThrow(); + this.encodedIntList = this.intListCodec.encodeStart(this.ops, this.intList) + .result().orElseThrow(); + } + + // ==================== String List Benchmarks ==================== + + /** + * Benchmarks string list encoding to JSON array. + * + *

Measures the performance of converting a {@code List} to a JSON + * array element. Each string element is individually encoded and added to the + * resulting array.

+ * + *

Performance factors:

+ *
    + *
  • List iteration overhead
  • + *
  • Per-element string encoding cost
  • + *
  • JSON array construction and element addition
  • + *
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeStringList(final Blackhole blackhole) { + final DataResult result = this.stringListCodec.encodeStart(this.ops, this.stringList); + blackhole.consume(result); + } + + /** + * Benchmarks string list decoding from JSON array. + * + *

Measures the performance of extracting a {@code List} from a + * pre-encoded JSON array. Each array element is decoded to a string and + * collected into the result list.

+ * + *

Performance factors:

+ *
    + *
  • JSON array traversal
  • + *
  • Per-element string extraction
  • + *
  • Result list construction and population
  • + *
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeStringList(final Blackhole blackhole) { + final DataResult, JsonElement>> result = this.stringListCodec.decode(this.ops, this.encodedStringList); + blackhole.consume(result); + } + + // ==================== Integer List Benchmarks ==================== + + /** + * Benchmarks integer list encoding to JSON array. + * + *

Measures the performance of converting a {@code List} to a JSON + * array element. Integer encoding is typically faster than string encoding + * due to simpler value representation.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeIntList(final Blackhole blackhole) { + final DataResult result = this.intListCodec.encodeStart(this.ops, this.intList); + blackhole.consume(result); + } + + /** + * Benchmarks integer list decoding from JSON array. + * + *

Measures the performance of extracting a {@code List} from a + * pre-encoded JSON array. Integer decoding involves numeric parsing from + * JSON number elements.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeIntList(final Blackhole blackhole) { + final DataResult, JsonElement>> result = this.intListCodec.decode(this.ops, this.encodedIntList); + blackhole.consume(result); + } + + // ==================== Round-Trip Benchmarks (Direct Style) ==================== + + /** + * Benchmarks complete string list round-trip with direct result extraction. + * + *

Measures the combined performance of encoding a {@code List} to JSON + * and immediately decoding it back. Uses {@code result().orElseThrow()} for + * direct value extraction, representing typical imperative usage patterns.

+ * + *

This benchmark is useful for scenarios where data is temporarily serialized + * (e.g., caching, message passing) and immediately deserialized.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripStringListDirect(final Blackhole blackhole) { + final JsonElement json = this.stringListCodec.encodeStart(this.ops, this.stringList) + .result().orElseThrow(); + final Pair, JsonElement> decoded = this.stringListCodec.decode(this.ops, json) + .result().orElseThrow(); + blackhole.consume(decoded); + } + + /** + * Benchmarks complete integer list round-trip with direct result extraction. + * + *

Measures the combined performance of encoding a {@code List} to JSON + * and immediately decoding it back using direct value extraction.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripIntListDirect(final Blackhole blackhole) { + final JsonElement json = this.intListCodec.encodeStart(this.ops, this.intList) + .result().orElseThrow(); + final Pair, JsonElement> decoded = this.intListCodec.decode(this.ops, json) + .result().orElseThrow(); + blackhole.consume(decoded); + } + + // ==================== Round-Trip Benchmarks (Functional Style) ==================== + + /** + * Benchmarks complete string list round-trip using functional API. + * + *

Measures the combined performance of encoding and decoding using + * {@link DataResult#flatMap} for monadic composition. This represents + * the functional programming style where operations are chained without + * explicit result unwrapping.

+ * + *

Comparing with {@link #roundTripStringListDirect} reveals the overhead + * (if any) of the functional API approach versus direct extraction.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripStringListFunctional(final Blackhole blackhole) { + final DataResult encoded = this.stringListCodec.encodeStart(this.ops, this.stringList); + final DataResult, JsonElement>> decoded = encoded.flatMap( + json -> this.stringListCodec.decode(this.ops, json) + ); + blackhole.consume(decoded); + } + + /** + * Benchmarks complete integer list round-trip using functional API. + * + *

Measures the combined performance of encoding and decoding using + * monadic composition via {@link DataResult#flatMap}.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripIntListFunctional(final Blackhole blackhole) { + final DataResult encoded = this.intListCodec.encodeStart(this.ops, this.intList); + final DataResult, JsonElement>> decoded = encoded.flatMap( + json -> this.intListCodec.decode(this.ops, json) + ); + blackhole.consume(decoded); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java new file mode 100644 index 0000000..7e9c8da --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.codec; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.codec.Codecs; +import de.splatgames.aether.datafixers.api.result.DataResult; +import de.splatgames.aether.datafixers.api.util.Pair; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for primitive type codec encode/decode performance. + * + *

Measures the baseline performance of the fundamental codec operations for + * primitive Java types. These benchmarks establish the lower bound for codec + * performance and help identify overhead introduced by more complex codec + * compositions.

+ * + *

Benchmark Categories

+ * + *

Encode Benchmarks

+ *

Measure Java value to JSON element conversion:

+ *
    + *
  • {@link #encodeBool} - Boolean encoding
  • + *
  • {@link #encodeInt} - Integer encoding
  • + *
  • {@link #encodeLong} - Long encoding
  • + *
  • {@link #encodeFloat} - Float encoding
  • + *
  • {@link #encodeDouble} - Double encoding
  • + *
  • {@link #encodeString} - String encoding
  • + *
+ * + *

Decode Benchmarks

+ *

Measure JSON element to Java value conversion:

+ *
    + *
  • {@link #decodeBool} - Boolean decoding
  • + *
  • {@link #decodeInt} - Integer decoding
  • + *
  • {@link #decodeLong} - Long decoding
  • + *
  • {@link #decodeFloat} - Float decoding
  • + *
  • {@link #decodeDouble} - Double decoding
  • + *
  • {@link #decodeString} - String decoding
  • + *
+ * + *

Round-Trip Benchmarks

+ *

Measure complete encode-then-decode cycles:

+ *
    + *
  • {@link #roundTripIntDirect} - Integer round-trip with direct result extraction
  • + *
  • {@link #roundTripStringDirect} - String round-trip with direct result extraction
  • + *
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2 (for JIT variance mitigation)
JVM Heap2 GB min/max
Time UnitNanoseconds (for fine-grained primitive ops)
+ * + *

Test Values

+ * + * + * + * + * + * + * + * + *
TypeValueNotes
boolean{@code true}Single bit representation
int{@code 42}Small positive integer
long{@code 123456789L}Value exceeding int range representation
float{@code 3.14159f}Pi approximation (tests decimal handling)
double{@code 2.718281828}Euler's number (tests precision)
String{@code "benchmark-test-string"}21-character ASCII string
+ * + *

Interpreting Results

+ *
    + *
  • Encode vs Decode: Encoding typically allocates new JSON elements; decoding + * extracts values from existing elements. Similar performance is expected.
  • + *
  • Numeric types: All numeric types should have similar performance as they + * map directly to JSON number primitives.
  • + *
  • String codec: May show slightly different characteristics due to string + * interning and character encoding considerations.
  • + *
  • Round-trip overhead: Should be approximately encode + decode time plus + * minimal DataResult unwrapping overhead.
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run all primitive codec benchmarks
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark
+ *
+ * # Run only encode benchmarks
+ * java -jar benchmarks.jar "PrimitiveCodecBenchmark.encode.*"
+ *
+ * # Run only decode benchmarks
+ * java -jar benchmarks.jar "PrimitiveCodecBenchmark.decode.*"
+ *
+ * # Compare specific types
+ * java -jar benchmarks.jar "PrimitiveCodecBenchmark.*(Int|Long).*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate CSV for spreadsheet analysis
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark -rf csv -rff primitive_results.csv
+ * }
+ * + * @author Erik Pförtner + * @see CollectionCodecBenchmark + * @see de.splatgames.aether.datafixers.api.codec.Codecs + * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class PrimitiveCodecBenchmark { + + /** + * Test boolean value for encoding benchmarks. + */ + private static final boolean TEST_BOOL = true; + + /** + * Test integer value for encoding benchmarks. + * + *

A small positive integer that fits in a single JSON number token.

+ */ + private static final int TEST_INT = 42; + + /** + * Test long value for encoding benchmarks. + * + *

A value that exceeds typical int range to test long-specific handling.

+ */ + private static final long TEST_LONG = 123456789L; + + /** + * Test float value for encoding benchmarks. + * + *

Pi approximation to test decimal point handling and precision.

+ */ + private static final float TEST_FLOAT = 3.14159f; + + /** + * Test double value for encoding benchmarks. + * + *

Euler's number with extended precision to test double encoding accuracy.

+ */ + private static final double TEST_DOUBLE = 2.718281828; + + /** + * Test string value for encoding benchmarks. + * + *

A 21-character ASCII string representing typical field values.

+ */ + private static final String TEST_STRING = "benchmark-test-string"; + + /** + * The DynamicOps implementation used for all codec operations. + * + *

GsonOps is used as the reference implementation for JSON format benchmarks.

+ */ + private GsonOps ops; + + /** + * Pre-encoded boolean JSON element for decode benchmarks. + */ + private JsonElement encodedBool; + + /** + * Pre-encoded integer JSON element for decode benchmarks. + */ + private JsonElement encodedInt; + + /** + * Pre-encoded long JSON element for decode benchmarks. + */ + private JsonElement encodedLong; + + /** + * Pre-encoded float JSON element for decode benchmarks. + */ + private JsonElement encodedFloat; + + /** + * Pre-encoded double JSON element for decode benchmarks. + */ + private JsonElement encodedDouble; + + /** + * Pre-encoded string JSON element for decode benchmarks. + */ + private JsonElement encodedString; + + /** + * Initializes pre-encoded JSON elements for decode benchmarks. + * + *

Pre-encoding ensures decode benchmarks measure only decoding performance + * without encoding overhead. All test values are encoded once at trial start.

+ */ + @Setup(Level.Trial) + public void setup() { + this.ops = GsonOps.INSTANCE; + + this.encodedBool = Codecs.BOOL.encodeStart(this.ops, TEST_BOOL).result().orElseThrow(); + this.encodedInt = Codecs.INT.encodeStart(this.ops, TEST_INT).result().orElseThrow(); + this.encodedLong = Codecs.LONG.encodeStart(this.ops, TEST_LONG).result().orElseThrow(); + this.encodedFloat = Codecs.FLOAT.encodeStart(this.ops, TEST_FLOAT).result().orElseThrow(); + this.encodedDouble = Codecs.DOUBLE.encodeStart(this.ops, TEST_DOUBLE).result().orElseThrow(); + this.encodedString = Codecs.STRING.encodeStart(this.ops, TEST_STRING).result().orElseThrow(); + } + + // ==================== Boolean Benchmarks ==================== + + /** + * Benchmarks boolean value encoding to JSON. + * + *

Measures the performance of converting a Java {@code boolean} to a + * JSON boolean element via {@link Codecs#BOOL}.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeBool(final Blackhole blackhole) { + final DataResult result = Codecs.BOOL.encodeStart(this.ops, TEST_BOOL); + blackhole.consume(result); + } + + /** + * Benchmarks boolean value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code Boolean} from a + * pre-encoded JSON boolean element.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeBool(final Blackhole blackhole) { + final DataResult> result = Codecs.BOOL.decode(this.ops, this.encodedBool); + blackhole.consume(result); + } + + // ==================== Integer Benchmarks ==================== + + /** + * Benchmarks integer value encoding to JSON. + * + *

Measures the performance of converting a Java {@code int} to a + * JSON number element via {@link Codecs#INT}.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeInt(final Blackhole blackhole) { + final DataResult result = Codecs.INT.encodeStart(this.ops, TEST_INT); + blackhole.consume(result); + } + + /** + * Benchmarks integer value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code Integer} from a + * pre-encoded JSON number element.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeInt(final Blackhole blackhole) { + final DataResult> result = Codecs.INT.decode(this.ops, this.encodedInt); + blackhole.consume(result); + } + + // ==================== Long Benchmarks ==================== + + /** + * Benchmarks long value encoding to JSON. + * + *

Measures the performance of converting a Java {@code long} to a + * JSON number element via {@link Codecs#LONG}.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeLong(final Blackhole blackhole) { + final DataResult result = Codecs.LONG.encodeStart(this.ops, TEST_LONG); + blackhole.consume(result); + } + + /** + * Benchmarks long value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code Long} from a + * pre-encoded JSON number element.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeLong(final Blackhole blackhole) { + final DataResult> result = Codecs.LONG.decode(this.ops, this.encodedLong); + blackhole.consume(result); + } + + // ==================== Float Benchmarks ==================== + + /** + * Benchmarks float value encoding to JSON. + * + *

Measures the performance of converting a Java {@code float} to a + * JSON number element via {@link Codecs#FLOAT}. Float encoding involves + * decimal representation which may differ from integer encoding.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeFloat(final Blackhole blackhole) { + final DataResult result = Codecs.FLOAT.encodeStart(this.ops, TEST_FLOAT); + blackhole.consume(result); + } + + /** + * Benchmarks float value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code Float} from a + * pre-encoded JSON number element. Decoding involves parsing the decimal + * representation back to IEEE 754 single-precision format.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeFloat(final Blackhole blackhole) { + final DataResult> result = Codecs.FLOAT.decode(this.ops, this.encodedFloat); + blackhole.consume(result); + } + + // ==================== Double Benchmarks ==================== + + /** + * Benchmarks double value encoding to JSON. + * + *

Measures the performance of converting a Java {@code double} to a + * JSON number element via {@link Codecs#DOUBLE}. Double encoding preserves + * higher precision than float but uses similar mechanisms.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeDouble(final Blackhole blackhole) { + final DataResult result = Codecs.DOUBLE.encodeStart(this.ops, TEST_DOUBLE); + blackhole.consume(result); + } + + /** + * Benchmarks double value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code Double} from a + * pre-encoded JSON number element.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeDouble(final Blackhole blackhole) { + final DataResult> result = Codecs.DOUBLE.decode(this.ops, this.encodedDouble); + blackhole.consume(result); + } + + // ==================== String Benchmarks ==================== + + /** + * Benchmarks string value encoding to JSON. + * + *

Measures the performance of converting a Java {@code String} to a + * JSON string element via {@link Codecs#STRING}. String encoding may involve + * escape sequence handling for special characters.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeString(final Blackhole blackhole) { + final DataResult result = Codecs.STRING.encodeStart(this.ops, TEST_STRING); + blackhole.consume(result); + } + + /** + * Benchmarks string value decoding from JSON. + * + *

Measures the performance of extracting a Java {@code String} from a + * pre-encoded JSON string element.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeString(final Blackhole blackhole) { + final DataResult> result = Codecs.STRING.decode(this.ops, this.encodedString); + blackhole.consume(result); + } + + // ==================== Round-Trip Benchmarks ==================== + + /** + * Benchmarks complete integer round-trip (encode then decode). + * + *

Measures the combined performance of encoding a Java {@code int} to JSON + * and immediately decoding it back. Uses direct result extraction via + * {@code result().orElseThrow()} to measure the typical non-functional usage pattern.

+ * + *

Round-trip performance is important for scenarios where data is temporarily + * serialized (e.g., caching, IPC) and immediately deserialized.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripIntDirect(final Blackhole blackhole) { + final JsonElement json = Codecs.INT.encodeStart(this.ops, TEST_INT).result().orElseThrow(); + final Pair decoded = Codecs.INT.decode(this.ops, json).result().orElseThrow(); + blackhole.consume(decoded); + } + + /** + * Benchmarks complete string round-trip (encode then decode). + * + *

Measures the combined performance of encoding a Java {@code String} to JSON + * and immediately decoding it back. String round-trips may involve additional + * overhead from string object creation compared to primitive numeric types.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripStringDirect(final Blackhole blackhole) { + final JsonElement json = Codecs.STRING.encodeStart(this.ops, TEST_STRING).result().orElseThrow(); + final Pair decoded = Codecs.STRING.decode(this.ops, json).result().orElseThrow(); + blackhole.consume(decoded); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java new file mode 100644 index 0000000..5720cfc --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Codec-focused JMH benchmarks for the Aether DataFixers framework. + * + *

This package contains benchmarks that measure the performance of codec operations, + * including encoding (Java objects to serialized format) and decoding (serialized format + * to Java objects). These benchmarks establish baseline performance for the codec system + * and help identify bottlenecks in serialization pipelines.

+ * + *

Benchmark Classes

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ClassFocus AreaKey Metrics
{@link de.splatgames.aether.datafixers.benchmarks.codec.PrimitiveCodecBenchmark}Primitive type codecs (bool, int, long, float, double, string)Baseline encode/decode latency, round-trip overhead
{@link de.splatgames.aether.datafixers.benchmarks.codec.CollectionCodecBenchmark}Collection codecs (List<String>, List<Integer>)Scaling with collection size, functional vs direct API overhead
+ * + *

Why Codec Benchmarks?

+ *

Codecs are fundamental to the DataFixer system, transforming data between typed + * Java objects and format-agnostic {@link de.splatgames.aether.datafixers.api.dynamic.Dynamic} + * representations. Understanding codec performance is essential for:

+ *
    + *
  • Baseline establishment: Primitive codecs set the lower bound for all + * codec operations; complex codecs compose these primitives
  • + *
  • Bottleneck identification: Comparing encode vs decode reveals which + * direction is more expensive for a given type
  • + *
  • Scaling analysis: Collection benchmarks show how performance changes + * with data volume
  • + *
  • API comparison: Direct extraction vs functional composition may have + * different performance characteristics
  • + *
+ * + *

Running Codec Benchmarks

+ *
{@code
+ * # Run all codec benchmarks
+ * java -jar benchmarks.jar ".*codec.*"
+ *
+ * # Run only primitive codec benchmarks
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark
+ *
+ * # Run only collection codec benchmarks
+ * java -jar benchmarks.jar CollectionCodecBenchmark
+ *
+ * # Run encode-only benchmarks across all codec types
+ * java -jar benchmarks.jar ".*codec.*encode.*"
+ *
+ * # Run decode-only benchmarks
+ * java -jar benchmarks.jar ".*codec.*decode.*"
+ *
+ * # Run round-trip benchmarks
+ * java -jar benchmarks.jar ".*codec.*roundTrip.*"
+ *
+ * # Quick validation with reduced iterations
+ * java -jar benchmarks.jar ".*codec.*" -wi 1 -i 1 -f 1
+ *
+ * # Generate CSV report for analysis
+ * java -jar benchmarks.jar ".*codec.*" -rf csv -rff codec_results.csv
+ * }
+ * + *

Benchmark Design Principles

+ *
    + *
  • Isolated operations: Encode and decode are benchmarked separately to + * identify which direction is more expensive
  • + *
  • Pre-encoded data: Decode benchmarks use pre-encoded JSON elements + * created during setup to avoid measuring encoding overhead
  • + *
  • Parameterization: Collection sizes are parameterized to reveal + * scaling characteristics
  • + *
  • API styles: Both direct extraction ({@code result().orElseThrow()}) + * and functional composition ({@code flatMap}) are benchmarked for round-trips
  • + *
  • Time units: Nanoseconds for primitives (sub-microsecond operations), + * microseconds for collections (longer operations)
  • + *
+ * + *

Interpreting Codec Results

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ObservationMeaningAction
Encode slower than decodeJSON element construction more expensive than extractionConsider caching encoded results if reused
Decode slower than encodeType parsing/validation overhead dominatesReview type conversion logic
Super-linear collection scalingGC pressure or algorithmic inefficiencyProfile memory allocation; consider streaming
Functional API slower than directLambda/closure overhead measurableUse direct extraction for hot paths
String codec slower than numericString allocation/interning overheadExpected; no action needed
+ * + *

Relationship to Other Benchmarks

+ *

Codec benchmarks complement other benchmark packages:

+ *
    + *
  • {@link de.splatgames.aether.datafixers.benchmarks.core core} - Uses codecs + * internally; codec performance affects fix application time
  • + *
  • {@link de.splatgames.aether.datafixers.benchmarks.concurrent concurrent} - + * Codec thread-safety is assumed; concurrent benchmarks validate this assumption
  • + *
+ * + *

Supported Serialization Formats

+ *

These benchmarks use {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} + * as the reference DynamicOps implementation. The codec system supports multiple formats:

+ *
    + *
  • JSON: GsonOps, JacksonJsonOps
  • + *
  • YAML: SnakeYamlOps, JacksonYamlOps
  • + *
  • TOML: JacksonTomlOps
  • + *
  • XML: JacksonXmlOps
  • + *
+ *

Future benchmarks may compare performance across different DynamicOps implementations.

+ * + * @see de.splatgames.aether.datafixers.benchmarks.codec.PrimitiveCodecBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.codec.CollectionCodecBenchmark + * @see de.splatgames.aether.datafixers.api.codec.Codec + * @see de.splatgames.aether.datafixers.api.codec.Codecs + * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.codec; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java new file mode 100644 index 0000000..a1830bf --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java @@ -0,0 +1,601 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.concurrent; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.api.schema.Schema; +import de.splatgames.aether.datafixers.api.schema.SchemaRegistry; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.core.schema.SimpleSchemaRegistry; +import de.splatgames.aether.datafixers.testkit.factory.MockSchemas; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.SplittableRandom; +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for concurrent DataFixer operations and thread-safety validation. + * + *

This benchmark measures the performance characteristics of the DataFixer system + * under concurrent load. It validates thread-safety of shared components and quantifies + * scalability across different thread counts. The results help identify contention + * points and ensure the framework performs well in multi-threaded environments.

+ * + *

Benchmark Categories

+ * + *

Concurrent Migration Benchmarks

+ *

Measure DataFixer performance when multiple threads perform migrations simultaneously:

+ *
    + *
  • {@link #concurrentSingleFix} - Maximum parallelism with single-fix migrations
  • + *
  • {@link #concurrentChainMigration} - Maximum parallelism with 10-fix chain migrations
  • + *
  • {@link #fourThreadMigration} - Fixed 4-thread migration for baseline comparison
  • + *
  • {@link #eightThreadMigration} - Fixed 8-thread migration for scaling analysis
  • + *
+ * + *

Concurrent Registry Access Benchmarks

+ *

Measure SchemaRegistry performance under concurrent read pressure:

+ *
    + *
  • {@link #concurrentRegistryLookup} - Random version lookups from multiple threads
  • + *
  • {@link #concurrentLatestLookup} - Latest schema lookups (hot path optimization)
  • + *
+ * + *

Thread Configuration

+ * + * + * + * + * + * + * + * + *
BenchmarkThreadsPurpose
concurrentSingleFixMAX (all available)Maximum contention stress test
concurrentChainMigrationMAXChain migration under full load
fourThreadMigration4Typical server scenario baseline
eightThreadMigration8Higher parallelism scaling point
concurrentRegistryLookupMAXRegistry contention stress test
concurrentLatestLookupMAXHot path contention analysis
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
payloadSizeSMALL, MEDIUMInput data complexity per thread
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup3 iterations, 2 seconds each
Measurement5 iterations, 2 seconds each
Forks2 (for JIT variance mitigation)
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

State Management

+ *

This benchmark uses two JMH state classes to properly isolate shared and + * thread-local data:

+ *
    + *
  • {@link BenchmarkState} (Scope.Benchmark) - Shared across all threads: DataFixer + * instances, SchemaRegistry, and version constants
  • + *
  • {@link ThreadState} (Scope.Thread) - Per-thread isolation: input data, RNG, + * and pre-computed random indices to avoid contention
  • + *
+ * + *

Interpreting Results

+ *
    + *
  • Linear throughput scaling: Ideal - throughput increases proportionally with thread count
  • + *
  • Sub-linear scaling: Expected due to shared resource contention (cache lines, locks)
  • + *
  • Throughput plateau: Indicates saturation point; adding threads provides no benefit
  • + *
  • Throughput degradation: Severe contention; may indicate lock contention or false sharing
  • + *
  • High variance (±): May indicate GC pauses, lock contention, or scheduler interference
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run all concurrent benchmarks
+ * java -jar benchmarks.jar ".*concurrent.*"
+ *
+ * # Run with specific thread count override
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -t 16
+ *
+ * # Run registry-only benchmarks
+ * java -jar benchmarks.jar ".*concurrent.*Lookup.*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate JSON report for analysis
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -rf json -rff concurrent_results.json
+ *
+ * # Profile with async-profiler integration
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -prof async:output=flamegraph
+ * }
+ * + * @author Erik Pförtner + * @see de.splatgames.aether.datafixers.benchmarks.core.SingleFixBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.core.MultiFixChainBenchmark + * @see BenchmarkBootstrap + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 3, time = 2, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 5, time = 2, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class ConcurrentMigrationBenchmark { + + // ==================== Concurrent Migration Benchmarks ==================== + + /** + * Benchmarks concurrent single-fix migrations with maximum thread parallelism. + * + *

All available CPU threads simultaneously apply a single DataFix to their + * respective input data. This benchmark stress-tests the thread-safety of the + * DataFixer implementation and measures maximum achievable throughput.

+ * + *

Key aspects measured:

+ *
    + *
  • Lock contention in shared DataFixer instance
  • + *
  • Memory allocation pressure under concurrent load
  • + *
  • Cache coherency effects from shared schema access
  • + *
+ * + * @param s shared benchmark state containing the DataFixer and versions + * @param t per-thread state containing isolated input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(Threads.MAX) + public void concurrentSingleFix(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final Dynamic result = s.sharedFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + t.threadInput, + s.fromVersion, + s.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks concurrent chain migrations with maximum thread parallelism. + * + *

All available CPU threads simultaneously apply a 10-fix chain migration. + * This benchmark combines the stress of concurrent access with the complexity + * of multi-step migrations, revealing performance characteristics under + * realistic high-load scenarios.

+ * + *

Compared to {@link #concurrentSingleFix}, this benchmark:

+ *
    + *
  • Increases per-operation duration, potentially reducing contention
  • + *
  • Exercises fix ordering and version traversal logic concurrently
  • + *
  • Creates higher memory allocation rates per thread
  • + *
+ * + * @param s shared benchmark state containing the chain DataFixer + * @param t per-thread state containing isolated input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(Threads.MAX) + public void concurrentChainMigration(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final Dynamic result = s.sharedChainFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + t.threadInput, + s.fromVersion, + s.chainToVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks migration performance with exactly 4 concurrent threads. + * + *

Provides a fixed-thread baseline for comparing against variable-thread + * benchmarks. Four threads represent a typical server core count and help + * establish scaling characteristics between single-threaded and maximum + * parallelism scenarios.

+ * + *

Use this benchmark to:

+ *
    + *
  • Establish baseline concurrent performance on quad-core systems
  • + *
  • Compare with {@link #eightThreadMigration} to measure scaling factor
  • + *
  • Identify the point where adding threads provides diminishing returns
  • + *
+ * + * @param s shared benchmark state containing the DataFixer + * @param t per-thread state containing isolated input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(4) + public void fourThreadMigration(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final Dynamic result = s.sharedFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + t.threadInput, + s.fromVersion, + s.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks migration performance with exactly 8 concurrent threads. + * + *

Tests scaling beyond the 4-thread baseline. Eight threads represent + * a common server configuration and help identify whether the DataFixer + * implementation scales efficiently with additional parallelism.

+ * + *

Scaling analysis:

+ *
    + *
  • 2x throughput vs 4 threads: Perfect linear scaling
  • + *
  • 1.5-2x throughput: Good scaling with minor contention
  • + *
  • <1.5x throughput: Contention limiting scalability
  • + *
  • ≤1x throughput: Severe contention; investigate locking
  • + *
+ * + * @param s shared benchmark state containing the DataFixer + * @param t per-thread state containing isolated input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(8) + public void eightThreadMigration(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final Dynamic result = s.sharedFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + t.threadInput, + s.fromVersion, + s.toVersion + ); + blackhole.consume(result); + } + + // ==================== Concurrent Registry Access Benchmarks ==================== + + /** + * Benchmarks concurrent random schema lookups from the registry. + * + *

All available threads perform random version lookups against a shared + * {@link SchemaRegistry} containing 100 schema versions. This benchmark + * validates the thread-safety and performance of registry read operations + * under heavy concurrent access.

+ * + *

The benchmark uses pre-computed random indices (via {@link ThreadState#nextRegistryIndex()}) + * to avoid RNG contention affecting measurements. Each thread cycles through + * a 1024-element buffer of random indices.

+ * + *

Performance expectations:

+ *
    + *
  • Registry lookups should be lock-free and scale linearly
  • + *
  • Cache effects may cause variance based on version access patterns
  • + *
  • No write contention since registry is frozen before benchmarking
  • + *
+ * + * @param s shared benchmark state containing the registry and versions + * @param t per-thread state providing random index sequence + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(Threads.MAX) + public void concurrentRegistryLookup(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final int index = t.nextRegistryIndex(); + final Schema schema = s.sharedRegistry.get(s.registryVersions[index]); + blackhole.consume(schema); + } + + /** + * Benchmarks concurrent latest-schema lookups from the registry. + * + *

All available threads repeatedly call {@link SchemaRegistry#latest()} + * on a shared registry. This represents the "hot path" optimization where + * applications frequently need the most recent schema version.

+ * + *

This benchmark helps validate:

+ *
    + *
  • Caching effectiveness for the latest schema reference
  • + *
  • Memory visibility of the cached latest schema across threads
  • + *
  • Absence of unnecessary synchronization on read-only access
  • + *
+ * + *

Expected to outperform {@link #concurrentRegistryLookup} due to:

+ *
    + *
  • No version-to-schema map lookup required
  • + *
  • Single cached reference rather than computed lookup
  • + *
  • Better CPU cache utilization from accessing same memory location
  • + *
+ * + * @param s shared benchmark state containing the registry + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + @Threads(Threads.MAX) + public void concurrentLatestLookup(final BenchmarkState s, + final Blackhole blackhole) { + final Schema schema = s.sharedRegistry.latest(); + blackhole.consume(schema); + } + + // ==================== State Classes ==================== + + /** + * Shared benchmark state accessible by all threads. + * + *

This state class contains all resources that are shared across benchmark + * threads, simulating real-world scenarios where a single DataFixer instance + * serves multiple concurrent requests.

+ * + *

State initialization occurs once per trial (before warmup begins) to + * ensure consistent starting conditions across all measurement iterations.

+ * + *

Shared Resources

+ *
    + *
  • {@link #sharedFixer} - Single-fix DataFixer for basic migration benchmarks
  • + *
  • {@link #sharedChainFixer} - 10-fix chain DataFixer for chain migration benchmarks
  • + *
  • {@link #sharedRegistry} - Frozen SchemaRegistry with 100 versions for lookup benchmarks
  • + *
  • Version constants - Pre-computed DataVersion instances to avoid allocation during measurement
  • + *
+ */ + @State(Scope.Benchmark) + public static class BenchmarkState { + + /** + * The payload size parameter, injected by JMH. + * + *

Controls the complexity of generated test data for each thread. + * Only SMALL and MEDIUM sizes are used to balance benchmark runtime + * with meaningful performance differentiation.

+ * + * @see PayloadSize + */ + @Param({"SMALL", "MEDIUM"}) + public PayloadSize payloadSize; + + /** + * Shared DataFixer configured with a single fix (v1 → v2). + * + *

Used by migration benchmarks that measure basic concurrent + * fix application without chain traversal overhead.

+ */ + public DataFixer sharedFixer; + + /** + * Shared DataFixer configured with a 10-fix chain (v1 → v11). + * + *

Used by {@link #concurrentChainMigration} to measure concurrent + * performance when applying multiple sequential fixes.

+ */ + public DataFixer sharedChainFixer; + + /** + * Shared SchemaRegistry containing 100 schema versions. + * + *

The registry is frozen after population to ensure thread-safe + * read access during benchmarks. Versions range from 10 to 1000 + * in increments of 10.

+ */ + public SchemaRegistry sharedRegistry; + + /** + * Source version for all migrations (v1). + */ + public DataVersion fromVersion; + + /** + * Target version for single-fix migrations (v2). + */ + public DataVersion toVersion; + + /** + * Target version for chain migrations (v11). + */ + public DataVersion chainToVersion; + + /** + * Pre-computed DataVersion array for registry lookup benchmarks. + * + *

Contains 100 versions (10, 20, 30, ..., 1000) matching the + * schemas registered in {@link #sharedRegistry}. Pre-allocation + * avoids DataVersion object creation during measurement.

+ */ + public DataVersion[] registryVersions; + + /** + * Initializes all shared benchmark state. + * + *

Creates DataFixer instances, populates the SchemaRegistry with + * 100 versions, and pre-computes all version constants. The registry + * is frozen after population to enable lock-free concurrent reads.

+ */ + @Setup(Level.Trial) + public void setup() { + this.sharedFixer = BenchmarkBootstrap.createSingleFixFixer(); + this.sharedChainFixer = BenchmarkBootstrap.createChainFixer(10); + + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + this.chainToVersion = new DataVersion(11); + + final SimpleSchemaRegistry registry = new SimpleSchemaRegistry(); + this.registryVersions = new DataVersion[100]; + for (int i = 0; i < 100; i++) { + final int version = (i + 1) * 10; + this.registryVersions[i] = new DataVersion(version); + registry.register(MockSchemas.minimal(version)); + } + registry.freeze(); + this.sharedRegistry = registry; + } + } + + /** + * Per-thread benchmark state for isolated data and random access patterns. + * + *

This state class provides each benchmark thread with its own input data + * and random number generator to eliminate false sharing and contention on + * thread-local operations.

+ * + *

Design Rationale

+ *
    + *
  • Thread-local input: Each thread operates on its own Dynamic instance, + * preventing write contention and ensuring independent GC behavior
  • + *
  • SplittableRandom: Faster and contention-free compared to + * {@link java.util.Random} which uses atomic CAS operations
  • + *
  • Pre-computed indices: Random registry indices are generated during + * setup to avoid RNG overhead during measurement
  • + *
+ * + *

Index Buffer Strategy

+ *

The {@link #registryIndexBuffer} uses a power-of-two size (1024) with + * bitwise AND masking for efficient wraparound without modulo operations. + * This provides pseudo-random access patterns while minimizing measurement + * overhead.

+ */ + @State(Scope.Thread) + public static class ThreadState { + + /** + * Size of the pre-computed random index buffer. + * + *

Power of two (1024) enables efficient wraparound via bitwise AND. + * Large enough to avoid pattern repetition affecting cache behavior + * during typical measurement windows.

+ */ + private static final int INDEX_BUFFER_SIZE = 1024; + + /** + * Bitmask for efficient modulo operation on buffer index. + * + *

Used as {@code cursor & INDEX_MASK} instead of {@code cursor % INDEX_BUFFER_SIZE} + * for faster wraparound calculation.

+ */ + private static final int INDEX_MASK = INDEX_BUFFER_SIZE - 1; + + /** + * Pre-computed random indices for registry lookup benchmarks. + * + *

Populated during iteration setup with random values in range + * [0, registryVersions.length). Accessed via {@link #nextRegistryIndex()}.

+ */ + private final int[] registryIndexBuffer = new int[INDEX_BUFFER_SIZE]; + + /** + * Per-thread input data for migration benchmarks. + * + *

Regenerated at each iteration to ensure consistent memory allocation + * patterns and prevent cross-iteration caching effects.

+ */ + public Dynamic threadInput; + + /** + * Current position in the {@link #registryIndexBuffer}. + * + *

Incremented on each call to {@link #nextRegistryIndex()} and + * wrapped using {@link #INDEX_MASK}.

+ */ + private int registryCursor; + + /** + * Per-thread random number generator. + * + *

{@link SplittableRandom} is used instead of {@link java.util.Random} + * because it is faster and does not use atomic operations, eliminating + * contention when multiple threads generate random numbers.

+ */ + private SplittableRandom random; + + /** + * Initializes the per-thread random number generator. + * + *

Called once per trial. Uses a fixed seed (42) for reproducibility + * across benchmark runs, though each thread will produce different + * sequences due to {@link SplittableRandom}'s splittable nature.

+ */ + @Setup(Level.Trial) + public void setupTrial() { + // Per-thread RNG avoids contention and is faster than java.util.Random. + this.random = new SplittableRandom(42L); + } + + /** + * Regenerates input data and random indices for each iteration. + * + *

Fresh data generation per iteration ensures:

+ *
    + *
  • Consistent GC pressure across iterations
  • + *
  • No JIT over-optimization on specific data patterns
  • + *
  • Independent memory allocation per thread
  • + *
+ * + *

The random index buffer is refilled with new random values to + * vary the registry access pattern across iterations.

+ * + * @param s the shared benchmark state providing payload size and version array + */ + @Setup(Level.Iteration) + public void setupIteration(final BenchmarkState s) { + this.threadInput = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, s.payloadSize); + + for (int i = 0; i < INDEX_BUFFER_SIZE; i++) { + this.registryIndexBuffer[i] = this.random.nextInt(s.registryVersions.length); + } + this.registryCursor = 0; + } + + /** + * Returns the next pre-computed random index for registry lookups. + * + *

Retrieves the next value from {@link #registryIndexBuffer} and + * advances the cursor with efficient bitwise wraparound. This method + * is called during measurement and is optimized to minimize overhead.

+ * + * @return a random index in range [0, registryVersions.length) + */ + public int nextRegistryIndex() { + return this.registryIndexBuffer[this.registryCursor++ & INDEX_MASK]; + } + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java new file mode 100644 index 0000000..9b374ee --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Concurrency-focused JMH benchmarks for the Aether DataFixers framework. + * + *

This package contains benchmarks that measure performance characteristics under + * concurrent load. These benchmarks validate thread-safety of the DataFixer system, + * identify contention points, and quantify scalability across different thread counts.

+ * + *

Benchmark Classes

+ * + * + * + * + * + * + * + * + * + * + * + *
ClassFocus AreaKey Metrics
{@link de.splatgames.aether.datafixers.benchmarks.concurrent.ConcurrentMigrationBenchmark}Multi-threaded migration and registry accessThroughput scaling, contention overhead, thread-safety validation
+ * + *

Why Concurrent Benchmarks?

+ *

Single-threaded benchmarks measure raw operation performance, but real-world + * applications often use the DataFixer system from multiple threads simultaneously. + * Concurrent benchmarks reveal:

+ *
    + *
  • Lock contention: Synchronization overhead in shared components
  • + *
  • Cache coherency effects: Performance impact of shared data access
  • + *
  • Scalability limits: Point at which adding threads stops improving throughput
  • + *
  • Thread-safety validation: Correctness under concurrent access
  • + *
+ * + *

Running Concurrent Benchmarks

+ *
{@code
+ * # Run all concurrent benchmarks with maximum threads
+ * java -jar benchmarks.jar ".*concurrent.*"
+ *
+ * # Run with specific thread count
+ * java -jar benchmarks.jar ".*concurrent.*" -t 8
+ *
+ * # Quick validation with reduced iterations
+ * java -jar benchmarks.jar ".*concurrent.*" -wi 1 -i 1 -f 1
+ *
+ * # Generate detailed JSON report
+ * java -jar benchmarks.jar ".*concurrent.*" -rf json -rff concurrent_results.json
+ *
+ * # Profile lock contention with JFR
+ * java -jar benchmarks.jar ".*concurrent.*" -prof jfr
+ * }
+ * + *

Benchmark Design Principles

+ *
    + *
  • State Isolation: Per-thread state ({@code Scope.Thread}) for input data + * prevents false sharing and measurement interference
  • + *
  • Shared Resources: Benchmark-scoped state ({@code Scope.Benchmark}) for + * DataFixer instances simulates realistic concurrent access patterns
  • + *
  • Contention-Free Setup: Random number generation and data preparation + * occur during setup phases to avoid affecting measurements
  • + *
  • Fixed Thread Counts: Benchmarks with 4 and 8 threads provide + * reproducible scaling data points for comparison
  • + *
+ * + *

Interpreting Concurrent Results

+ *

Concurrent benchmark results require careful interpretation:

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
PatternMeaningAction
Linear throughput scalingNo contention; excellent parallelismNone needed
Sub-linear scalingSome contention; typical for shared resourcesAcceptable; monitor for degradation
Throughput plateauSaturation point reachedIdentify bottleneck (CPU, memory, locks)
Throughput degradationSevere contention; adding threads hurtsInvestigate locking; consider lock-free alternatives
High variance (± error)GC pauses, lock contention, or schedulingProfile with async-profiler or JFR
+ * + *

Comparison with Core Benchmarks

+ *

The {@link de.splatgames.aether.datafixers.benchmarks.core core} package + * measures single-threaded baseline performance. Use concurrent benchmarks to:

+ *
    + *
  • Calculate concurrency overhead: {@code (single-threaded throughput × N threads) / actual throughput}
  • + *
  • Identify scaling efficiency: {@code actual throughput / (single-threaded throughput × N threads)}
  • + *
  • Detect regression: Compare concurrent results across code changes
  • + *
+ * + * @see de.splatgames.aether.datafixers.benchmarks.concurrent.ConcurrentMigrationBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.core + * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.concurrent; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java new file mode 100644 index 0000000..2b3e535 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java @@ -0,0 +1,307 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.core; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for chained DataFix application performance. + * + *

Measures how fix chain length affects migration performance. This benchmark + * is essential for understanding the scalability characteristics of the DataFixer + * system when applying multiple sequential fixes.

+ * + *

Benchmark Methods

+ *
    + *
  • {@link #renameChain} - Chain of homogeneous field rename operations
  • + *
  • {@link #mixedChain} - Chain of heterogeneous operations (renames, additions, transformations)
  • + *
  • {@link #partialChain} - Partial chain execution stopping at halfway version
  • + *
+ * + *

Parameters

+ * + * + * + * + *
ParameterValuesDescription
fixCount1, 5, 10, 25, 50Number of fixes in the chain
payloadSizeSMALL, MEDIUMInput data complexity
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2 (for statistical significance)
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Interpreting Results

+ *
    + *
  • Linear scaling: Ideal behavior where time scales proportionally with fix count.
  • + *
  • Sub-linear scaling: Better than expected, indicates optimization opportunities being exploited.
  • + *
  • Super-linear scaling: Indicates potential performance issues with long chains.
  • + *
  • Error (±): 99.9% confidence interval. Larger values with more fixes may indicate GC pressure.
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar MultiFixChainBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar MultiFixChainBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific fix count and payload size
+ * java -jar benchmarks.jar MultiFixChainBenchmark -p fixCount=10 -p payloadSize=SMALL
+ *
+ * # Generate CSV output for analysis
+ * java -jar benchmarks.jar MultiFixChainBenchmark -rf csv -rff chain_results.csv
+ * }
+ * + * @author Erik Pförtner + * @see SingleFixBenchmark + * @see BenchmarkBootstrap#createChainFixer(int) + * @see BenchmarkBootstrap#createMixedFixer(int) + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class MultiFixChainBenchmark { + + /** + * The number of fixes in the chain, injected by JMH. + * + *

This parameter controls the length of the fix chain being benchmarked. + * Higher values test the system's ability to handle long migration paths + * efficiently.

+ * + *
    + *
  • 1: Baseline single-fix performance (compare with {@link SingleFixBenchmark})
  • + *
  • 5: Short chain typical of minor version updates
  • + *
  • 10: Medium chain representing moderate version gaps
  • + *
  • 25: Long chain simulating significant version jumps
  • + *
  • 50: Stress test for extended migration paths
  • + *
+ */ + @Param({"1", "5", "10", "25", "50"}) + private int fixCount; + + /** + * The payload size parameter, injected by JMH. + * + *

Controls the complexity of generated test data. Only SMALL and MEDIUM + * sizes are used to keep benchmark runtime reasonable while still capturing + * scaling behavior.

+ * + * @see PayloadSize + */ + @Param({"SMALL", "MEDIUM"}) + private PayloadSize payloadSize; + + /** + * DataFixer configured with a chain of homogeneous field rename fixes. + * + *

Each fix in the chain performs a simple field rename operation (v{@code n} → v{@code n+1}). + * This represents the best-case scenario for chain execution.

+ */ + private DataFixer chainFixer; + + /** + * DataFixer configured with a chain of heterogeneous fix operations. + * + *

The chain includes a mix of rename, add, and transform operations to + * simulate realistic migration scenarios. Falls back to {@link #chainFixer} + * if mixed fixer creation fails.

+ */ + private DataFixer mixedFixer; + + /** + * Pre-generated input data matching {@link #payloadSize}. + * + *

Regenerated at each iteration to ensure consistent GC behavior + * and avoid caching effects.

+ */ + private Dynamic input; + + /** + * Source version for migrations (always v1). + */ + private DataVersion fromVersion; + + /** + * Target version for full chain migrations (v{@link #fixCount} + 1). + */ + private DataVersion toVersion; + + /** + * Target version for partial chain migrations (approximately half of {@link #toVersion}). + * + *

Used by {@link #partialChain} to measure performance when only part + * of the available fixes are applied.

+ */ + private DataVersion halfwayToVersion; + + /** + * Initializes the benchmark state once per trial. + * + *

Creates the chain and mixed fixers based on the current {@link #fixCount} + * parameter. Also calculates the version bounds for full and partial chain + * execution.

+ * + *

If mixed fixer creation fails (e.g., due to unsupported operations), + * the chain fixer is used as a fallback to ensure the benchmark can still run.

+ */ + @Setup(Level.Trial) + public void setupTrial() { + this.chainFixer = BenchmarkBootstrap.createChainFixer(this.fixCount); + + try { + this.mixedFixer = BenchmarkBootstrap.createMixedFixer(this.fixCount); + } catch (final RuntimeException ex) { + this.mixedFixer = this.chainFixer; + } + + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(this.fixCount + 1); + + final int halfwayVersion = Math.max(2, (this.fixCount / 2) + 1); + this.halfwayToVersion = new DataVersion(halfwayVersion); + } + + /** + * Regenerates input data at each iteration. + * + *

Fresh data generation per iteration ensures that:

+ *
    + *
  • GC behavior is consistent across iterations
  • + *
  • JIT optimizations don't over-specialize on specific data patterns
  • + *
  • Memory allocation patterns are representative of real usage
  • + *
+ */ + @Setup(Level.Iteration) + public void setupIteration() { + this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, this.payloadSize); + } + + /** + * Benchmarks a chain of homogeneous field rename operations. + * + *

Measures the performance of applying {@link #fixCount} sequential rename + * fixes to migrate data from v1 to v{@code fixCount+1}. This represents an + * optimistic scenario where all fixes perform the same lightweight operation.

+ * + *

Use this benchmark to establish baseline chain performance and detect + * any non-linear scaling behavior in the fix application pipeline.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void renameChain(final Blackhole blackhole) { + final Dynamic result = this.chainFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.input, + this.fromVersion, + this.toVersion); + blackhole.consume(result); + } + + /** + * Benchmarks a chain of heterogeneous fix operations. + * + *

Measures the performance of applying {@link #fixCount} sequential fixes + * that include a mix of operations:

+ *
    + *
  • Field renames
  • + *
  • Field additions with default values
  • + *
  • Field transformations (type conversions, value mappings)
  • + *
+ * + *

This benchmark provides a more realistic performance profile compared + * to {@link #renameChain}, as real-world migrations typically involve + * diverse operations.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void mixedChain(final Blackhole blackhole) { + final Dynamic result = this.mixedFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.input, + this.fromVersion, + this.toVersion); + blackhole.consume(result); + } + + /** + * Benchmarks partial chain execution stopping at halfway version. + * + *

Measures the performance of applying only half of the available fixes + * in the chain. This simulates scenarios where:

+ *
    + *
  • Data is migrated incrementally rather than to the latest version
  • + *
  • Target version is not the most recent available
  • + *
  • Partial upgrades are performed for compatibility reasons
  • + *
+ * + *

Comparing this benchmark with {@link #renameChain} reveals whether + * fix selection and version range calculations add significant overhead.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void partialChain(final Blackhole blackhole) { + final Dynamic result = this.chainFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.input, + this.fromVersion, + this.halfwayToVersion + ); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java new file mode 100644 index 0000000..0b72395 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.core; + +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.schema.Schema; +import de.splatgames.aether.datafixers.api.schema.SchemaRegistry; +import de.splatgames.aether.datafixers.core.schema.SimpleSchemaRegistry; +import de.splatgames.aether.datafixers.testkit.factory.MockSchemas; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.SplittableRandom; +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for schema registry lookup performance. + * + *

Measures the overhead of various schema lookup operations as registry size grows. + * Schema lookups are performed frequently during data migration, so their performance directly impacts overall + * migration throughput.

+ * + *

Benchmark Methods

+ *
    + *
  • {@link #exactLookup} - Direct lookup by exact version match
  • + *
  • {@link #floorLookup} - Floor lookup finding closest version ≤ target
  • + *
  • {@link #latestLookup} - Retrieval of the most recent schema
  • + *
  • {@link #sequentialLookup} - Sequential traversal of all registered versions
  • + *
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
schemaCount10, 50, 100, 500Number of schemas in the registry
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2 (for statistical significance)
JVM Heap2 GB min/max
Time UnitNanoseconds
+ * + *

Interpreting Results

+ *
    + *
  • O(1) lookups: {@link #exactLookup} and {@link #latestLookup} should show constant time regardless of registry size.
  • + *
  • O(log n) lookups: {@link #floorLookup} may show logarithmic scaling if implemented via binary search.
  • + *
  • O(n) lookups: {@link #sequentialLookup} should scale linearly with schema count.
  • + *
  • Cache effects: Larger registries may show increased lookup time due to CPU cache pressure.
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar SchemaLookupBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar SchemaLookupBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific schema count only
+ * java -jar benchmarks.jar SchemaLookupBenchmark -p schemaCount=100
+ *
+ * # Run specific lookup benchmark
+ * java -jar benchmarks.jar SchemaLookupBenchmark.exactLookup
+ * }
+ * + * @author Erik Pförtner + * @see SchemaRegistry + * @see SimpleSchemaRegistry + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class SchemaLookupBenchmark { + + /** + * Benchmarks exact version lookup performance. + * + *

Measures the time to retrieve a schema by its exact registered version. + * This is the most common lookup pattern during migration when the source version is known precisely.

+ * + *

The benchmark uses pre-generated random indices to avoid RNG overhead + * in the measurement loop. Each invocation looks up a different random version to prevent branch prediction + * optimization.

+ * + * @param s the shared benchmark state containing the registry and versions + * @param t the per-thread state providing random lookup indices + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void exactLookup(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final int index = t.nextExactIndex(); + final Schema schema = s.registry.get(s.versions[index]); + blackhole.consume(schema); + } + + /** + * Benchmarks floor lookup performance. + * + *

Measures the time to retrieve a schema using floor semantics, where + * the registry returns the schema with the highest version ≤ the requested version. This pattern is used when + * data may be at intermediate versions not explicitly registered.

+ * + *

The lookup versions include both exact matches (10, 20, 30, ...) and + * in-between values (5, 15, 25, ...) to exercise both fast-path exact matches and slower floor searches.

+ * + * @param s the shared benchmark state containing the registry and lookup versions + * @param t the per-thread state providing random lookup indices + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void floorLookup(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final int index = t.nextFloorIndex(); + final Schema schema = s.registry.get(s.lookupVersions[index]); + blackhole.consume(schema); + } + + /** + * Benchmarks latest schema retrieval performance. + * + *

Measures the time to retrieve the most recent schema from the registry. + * This operation should be O(1) as the latest schema is typically cached or stored in a dedicated field.

+ * + *

This benchmark serves as a baseline for the fastest possible lookup + * operation and helps identify any unexpected overhead in the registry implementation.

+ * + * @param s the shared benchmark state containing the registry + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void latestLookup(final BenchmarkState s, + final Blackhole blackhole) { + final Schema schema = s.registry.latest(); + blackhole.consume(schema); + } + + /** + * Benchmarks sequential lookup of all registered schemas. + * + *

Measures the aggregate time to look up every schema in the registry + * in version order. This pattern occurs during schema validation, debugging, or when building migration path + * analyses.

+ * + *

Note: This benchmark performs multiple lookups per invocation + * ({@code schemaCount} lookups). The reported time is for the entire sequence, not per-lookup. Divide by + * {@code schemaCount} to get per-lookup overhead.

+ * + * @param s the shared benchmark state containing the registry and versions + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void sequentialLookup(final BenchmarkState s, + final Blackhole blackhole) { + for (final DataVersion version : s.versions) { + final Schema schema = s.registry.get(version); + blackhole.consume(schema); + } + } + + /** + * Shared JMH state containing the schema registry and version arrays. + * + *

This state is shared across all threads within a benchmark trial + * ({@link Scope#Benchmark}). The registry is populated with mock schemas at versions 10, 20, 30, ... up to + * {@code schemaCount * 10}.

+ * + *

The registry is frozen after setup to match production usage patterns + * where registries are immutable during normal operation.

+ */ + @State(Scope.Benchmark) + public static class BenchmarkState { + + /** + * The number of schemas to register, injected by JMH. + * + *

Controls the size of the schema registry to measure lookup + * performance scaling:

+ *
    + *
  • 10: Small registry, fits entirely in L1 cache
  • + *
  • 50: Medium registry, typical for most applications
  • + *
  • 100: Large registry, may exceed L1 cache
  • + *
  • 500: Stress test for registry scalability
  • + *
+ */ + @Param({"10", "50", "100", "500"}) + public int schemaCount; + + /** + * The frozen schema registry containing all registered schemas. + */ + public SchemaRegistry registry; + + /** + * Array of exact registered versions (10, 20, 30, ...). + * + *

Used by {@link #exactLookup} to ensure lookups always hit + * registered versions.

+ */ + public DataVersion[] versions; + + /** + * Array of lookup versions including in-between values (5, 10, 15, 20, ...). + * + *

Used by {@link #floorLookup} to exercise both exact matches + * and floor search behavior.

+ */ + public DataVersion[] lookupVersions; + + /** + * Initializes the schema registry and version arrays once per trial. + * + *

Creates a {@link SimpleSchemaRegistry} populated with minimal mock + * schemas at regular version intervals. The registry is frozen after population to enable any internal + * optimizations.

+ */ + @Setup(Level.Trial) + public void setup() { + final SimpleSchemaRegistry simpleRegistry = new SimpleSchemaRegistry(); + this.versions = new DataVersion[this.schemaCount]; + + for (int i = 0; i < this.schemaCount; i++) { + final int version = (i + 1) * 10; + final DataVersion dataVersion = new DataVersion(version); + this.versions[i] = dataVersion; + simpleRegistry.register(MockSchemas.minimal(version)); + } + + simpleRegistry.freeze(); + this.registry = simpleRegistry; + + this.lookupVersions = new DataVersion[this.schemaCount * 2]; + for (int i = 0; i < this.lookupVersions.length; i++) { + this.lookupVersions[i] = new DataVersion((i + 1) * 5); + } + } + } + + /** + * Per-thread JMH state providing pre-generated random lookup indices. + * + *

Random number generation is expensive and would dominate the benchmark + * if performed in the hot path. This state pre-generates buffers of random indices during setup, allowing the + * benchmark methods to retrieve indices via simple array access and bit masking.

+ * + *

Each thread has its own state instance ({@link Scope#Thread}) to avoid + * contention on shared RNG state. The fixed seed ensures reproducible results across benchmark runs.

+ * + * @see BenchmarkState + */ + @State(Scope.Thread) + public static class ThreadState { + + /** + * Size of the pre-generated index buffer. + * + *

Power-of-two size enables cheap index wrapping via bit masking + * instead of modulo operation.

+ */ + private static final int INDEX_BUFFER_SIZE = 1024; + + /** + * Bit mask for wrapping cursor to buffer bounds ({@code INDEX_BUFFER_SIZE - 1}). + */ + private static final int INDEX_MASK = INDEX_BUFFER_SIZE - 1; + + /** + * Pre-generated indices into {@link BenchmarkState#versions}. + */ + private final int[] exactIndices = new int[INDEX_BUFFER_SIZE]; + + /** + * Pre-generated indices into {@link BenchmarkState#lookupVersions}. + */ + private final int[] floorIndices = new int[INDEX_BUFFER_SIZE]; + + /** + * Current position in {@link #exactIndices}. + */ + private int exactCursor; + + /** + * Current position in {@link #floorIndices}. + */ + private int floorCursor; + + /** + * Thread-local random number generator for index generation. + */ + private SplittableRandom random; + + /** + * Initializes the random number generator once per trial. + * + *

Uses a fixed seed (42) for reproducibility. Each thread gets its + * own {@link SplittableRandom} instance to avoid synchronization overhead.

+ */ + @Setup(Level.Trial) + public void setupTrial() { + this.random = new SplittableRandom(42L); + } + + /** + * Refills the index buffers at each iteration. + * + *

Generates fresh random indices based on the current + * {@link BenchmarkState#schemaCount} parameter. Resets cursors to the beginning of each buffer.

+ * + * @param s the shared benchmark state providing array bounds + */ + @Setup(Level.Iteration) + public void setupIteration(final BenchmarkState s) { + for (int i = 0; i < INDEX_BUFFER_SIZE; i++) { + this.exactIndices[i] = this.random.nextInt(s.versions.length); + this.floorIndices[i] = this.random.nextInt(s.lookupVersions.length); + } + this.exactCursor = 0; + this.floorCursor = 0; + } + + /** + * Returns the next random index for exact version lookup. + * + *

Uses bit masking to wrap around the buffer efficiently.

+ * + * @return a random index into {@link BenchmarkState#versions} + */ + public int nextExactIndex() { + return this.exactIndices[this.exactCursor++ & INDEX_MASK]; + } + + /** + * Returns the next random index for floor version lookup. + * + *

Uses bit masking to wrap around the buffer efficiently.

+ * + * @return a random index into {@link BenchmarkState#lookupVersions} + */ + public int nextFloorIndex() { + return this.floorIndices[this.floorCursor++ & INDEX_MASK]; + } + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java new file mode 100644 index 0000000..c74d288 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java @@ -0,0 +1,315 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.core; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for single DataFix application performance. + * + *

Measures the overhead of applying a single fix to data of varying sizes. + * Includes a baseline identity fix measurement to isolate framework overhead from actual transformation costs.

+ * + *

Benchmark Methods

+ *
    + *
  • {@link #identityFix} - Baseline measurement with no-op transformation
  • + *
  • {@link #singleRenameFix} - Single field rename operation
  • + *
  • {@link #playerDataFix} - Complex object transformation with codec roundtrip
  • + *
  • {@link #playerDataFixEndToEnd} - Full pipeline including setup overhead
  • + *
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2 (for statistical significance)
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Interpreting Results

+ *
    + *
  • Throughput (ops/us): Higher is better. Operations per microsecond.
  • + *
  • Average Time (us/op): Lower is better. Microseconds per operation.
  • + *
  • Error (±): 99.9% confidence interval. Smaller means more stable results.
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar SingleFixBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar SingleFixBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific payload size only
+ * java -jar benchmarks.jar SingleFixBenchmark -p payloadSize=SMALL
+ * }
+ * + * @author Erik Pförtner + * @see BenchmarkBootstrap + * @see BenchmarkDataGenerator + * @see PayloadSize + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class SingleFixBenchmark { + + /** + * Benchmarks a single field rename operation. + * + *

Measures the performance of renaming one field in the input data. + * This represents a common, lightweight migration operation. The benchmark is parameterized by {@link PayloadSize} + * to measure scaling behavior.

+ * + * @param s the shared benchmark state containing fixer and input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void singleRenameFix(final SizedState s, + final Blackhole blackhole) { + blackhole.consume(s.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + s.input, + s.fromVersion, + s.toVersion)); + } + + /** + * Benchmarks the identity (no-op) fix as a baseline measurement. + * + *

Measures pure framework overhead without any actual data transformation. + * Use this as a baseline to calculate the true cost of transformations by subtracting identity time from other + * benchmark results.

+ * + * @param s the shared benchmark state containing identity fixer and input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void identityFix(final SizedState s, + final Blackhole blackhole) { + blackhole.consume(s.identityFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + s.input, + s.fromVersion, + s.toVersion)); + } + + /** + * Benchmarks a complex player data transformation with codec roundtrip. + * + *

Measures the performance of a realistic migration scenario where data + * is decoded via codec, transformed, and re-encoded. This represents the upper bound of migration cost for complex + * object transformations.

+ * + *

This benchmark is expected to be significantly slower than {@link #singleRenameFix} + * because codec roundtrips involve reflection, object instantiation, and full serialization/deserialization + * cycles.

+ * + * @param s the shared player benchmark state + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void playerDataFix(final PlayerState s, + final Blackhole blackhole) { + blackhole.consume(s.playerFixer.update( + BenchmarkBootstrap.PLAYER_TYPE, + s.playerInput, + s.fromVersion, + s.toVersion)); + } + + /** + * Benchmarks the complete end-to-end pipeline including setup overhead. + * + *

Measures the total cost of a migration including:

+ *
    + *
  • Test data generation
  • + *
  • DataFixer bootstrap and initialization
  • + *
  • Actual migration execution
  • + *
+ * + *

This benchmark is useful for understanding cold-start performance + * and the cost of creating new DataFixer instances. In production code, + * DataFixers should be reused rather than recreated per-operation.

+ * + *

Note: Results will be significantly slower than {@link #playerDataFix} + * due to setup overhead included in each iteration.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void playerDataFixEndToEnd(final Blackhole blackhole) { + final Dynamic playerInput = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE); + final DataFixer playerFixer = BenchmarkBootstrap.createPlayerFixer(); + blackhole.consume(playerFixer.update( + BenchmarkBootstrap.PLAYER_TYPE, + playerInput, + new DataVersion(1), + new DataVersion(2))); + } + + /** + * Shared JMH state for benchmarks parameterized by payload size. + * + *

This state is shared across all threads within a benchmark trial + * ({@link Scope#Benchmark}). The {@link #payloadSize} parameter controls the complexity of test data:

+ * + *
    + *
  • SMALL: 5 fields, 2 nesting levels, 10 array elements
  • + *
  • MEDIUM: 20 fields, 4 nesting levels, 100 array elements
  • + *
  • LARGE: 50 fields, 6 nesting levels, 1000 array elements
  • + *
+ * + * @see PayloadSize + */ + @State(Scope.Benchmark) + public static class SizedState { + + /** + * The payload size parameter, injected by JMH. Controls the complexity of generated test data. + */ + @Param({"SMALL", "MEDIUM", "LARGE"}) + public PayloadSize payloadSize; + + /** + * DataFixer configured with a single field rename fix (v1 → v2). + */ + public DataFixer fixer; + + /** + * DataFixer configured with an identity (no-op) fix for baseline measurement. + */ + public DataFixer identityFixer; + + /** + * Pre-generated input data matching {@link #payloadSize}. + */ + public Dynamic input; + + /** + * Source version for migrations (v1). + */ + public DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + public DataVersion toVersion; + + /** + * Initializes the benchmark state once per trial. + * + *

Creates fixers and generates test data based on the current + * {@link #payloadSize} parameter value.

+ */ + @Setup(Level.Trial) + public void setup() { + this.fixer = BenchmarkBootstrap.createSingleFixFixer(); + this.identityFixer = BenchmarkBootstrap.createIdentityFixer(); + this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, this.payloadSize); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + } + + /** + * Shared JMH state for player-specific benchmarks. + * + *

This state is separate from {@link SizedState} because the player benchmark + * uses a fixed, realistic data structure rather than parameterized payload sizes. The player data simulates a + * typical game entity with nested objects, arrays, and various field types.

+ * + *

The player fix performs a complete codec roundtrip transformation, + * making it representative of real-world migration scenarios where data is decoded, transformed, and + * re-encoded.

+ * + * @see BenchmarkBootstrap#createPlayerFixer() + * @see BenchmarkDataGenerator#generatePlayerData + */ + @State(Scope.Benchmark) + public static class PlayerState { + + /** + * DataFixer configured with a player-specific transformation fix. Performs codec decode → transform → encode + * cycle. + */ + public DataFixer playerFixer; + + /** + * Pre-generated player data structure with realistic game entity fields. + */ + public Dynamic playerInput; + + /** + * Source version for migrations (v1). + */ + public DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + public DataVersion toVersion; + + /** + * Initializes the player benchmark state once per trial. + * + *

Creates the player fixer and generates realistic player test data.

+ */ + @Setup(Level.Trial) + public void setup() { + this.playerFixer = BenchmarkBootstrap.createPlayerFixer(); + this.playerInput = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java new file mode 100644 index 0000000..32b058f --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Core JMH benchmarks for the Aether DataFixers framework. + * + *

This package contains benchmarks that measure the fundamental performance characteristics + * of the data fixer system, including fix application, chain execution, and schema registry + * operations. These benchmarks form the foundation for performance regression testing and + * optimization efforts.

+ * + *

Benchmark Classes

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ClassFocus AreaKey Metrics
{@link de.splatgames.aether.datafixers.benchmarks.core.SingleFixBenchmark}Single fix applicationPer-fix overhead, payload size scaling
{@link de.splatgames.aether.datafixers.benchmarks.core.MultiFixChainBenchmark}Chained fix executionChain length scaling, partial migration cost
{@link de.splatgames.aether.datafixers.benchmarks.core.SchemaLookupBenchmark}Schema registry operationsLookup latency, registry size scaling
+ * + *

Running Benchmarks

+ *
{@code
+ * # Run all core benchmarks
+ * java -jar benchmarks.jar ".*core.*"
+ *
+ * # Run with specific JVM options
+ * java -jar benchmarks.jar ".*core.*" -jvmArgs "-XX:+UseG1GC"
+ *
+ * # Generate JSON report
+ * java -jar benchmarks.jar ".*core.*" -rf json -rff core_results.json
+ * }
+ * + *

Benchmark Design Principles

+ *
    + *
  • Isolation: Each benchmark measures a single operation to isolate performance characteristics.
  • + *
  • Parameterization: Benchmarks are parameterized to capture scaling behavior across different input sizes.
  • + *
  • Reproducibility: Fixed seeds and deterministic data generation ensure reproducible results.
  • + *
  • JMH Best Practices: All benchmarks follow JMH guidelines including proper use of {@code Blackhole}, + * state scoping, and setup level annotations.
  • + *
+ * + *

Interpreting Results

+ *

All benchmarks in this package report both throughput (ops/time) and average time (time/op). + * When comparing results:

+ *
    + *
  • Compare measurements from the same JVM version and hardware
  • + *
  • Consider the 99.9% confidence interval (error bounds)
  • + *
  • Run multiple forks to account for JIT compilation variance
  • + *
  • Use baseline benchmarks (e.g., identity fix) to isolate framework overhead
  • + *
+ * + * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap + * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.core; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java new file mode 100644 index 0000000..ac0bce9 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.format; + +import com.fasterxml.jackson.databind.JsonNode; +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps; +import de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps; +import de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for cross-format conversion performance between DynamicOps implementations. + * + *

This benchmark measures the overhead of converting data between different + * serialization formats using the {@code DynamicOps.convertTo()} mechanism. Cross-format + * conversion is essential when integrating systems that use different data formats + * or when migrating data through format-agnostic DataFixers.

+ * + *

Conversion Pairs Benchmarked

+ * + *

JSON Library Conversions

+ *
    + *
  • {@link #gsonToJackson} - Gson JsonElement → Jackson JsonNode
  • + *
  • {@link #jacksonToGson} - Jackson JsonNode → Gson JsonElement
  • + *
+ * + *

JSON to YAML Conversions

+ *
    + *
  • {@link #gsonToSnakeYaml} - Gson JsonElement → SnakeYAML Object
  • + *
  • {@link #snakeYamlToGson} - SnakeYAML Object → Gson JsonElement
  • + *
+ * + *

Jackson Ecosystem Conversions

+ *
    + *
  • {@link #jacksonJsonToYaml} - Jackson JSON → Jackson YAML
  • + *
  • {@link #jacksonYamlToJson} - Jackson YAML → Jackson JSON
  • + *
+ * + *

YAML Library Conversions

+ *
    + *
  • {@link #snakeYamlToJacksonYaml} - SnakeYAML → Jackson YAML
  • + *
  • {@link #jacksonYamlToSnakeYaml} - Jackson YAML → SnakeYAML
  • + *
+ * + *

Conversion Matrix

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
From \ ToGsonJackson JSONSnakeYAMLJackson YAML
Gson--
Jackson JSON--
SnakeYAML--
Jackson YAML--
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
payloadSizeSMALL, MEDIUMTest data complexity
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Interpreting Results

+ *
    + *
  • Same-ecosystem conversions (e.g., Jackson JSON ↔ Jackson YAML) are + * typically faster due to shared internal representations
  • + *
  • Cross-ecosystem conversions (e.g., Gson ↔ SnakeYAML) require full + * tree traversal and node creation
  • + *
  • Asymmetric performance: A→B may differ from B→A due to different + * source iteration and target construction costs
  • + *
+ * + *

Usage

+ *
{@code
+ * # Run all cross-format benchmarks
+ * java -jar benchmarks.jar CrossFormatBenchmark
+ *
+ * # Run JSON library conversions only
+ * java -jar benchmarks.jar "CrossFormatBenchmark.*(gson|jackson)To(Gson|Jackson).*"
+ *
+ * # Run YAML conversions only
+ * java -jar benchmarks.jar "CrossFormatBenchmark.*Yaml.*"
+ *
+ * # Compare with specific payload size
+ * java -jar benchmarks.jar CrossFormatBenchmark -p payloadSize=MEDIUM
+ * }
+ * + * @author Erik Pförtner + * @see JsonBenchmark + * @see YamlBenchmark + * @see TomlXmlBenchmark + * @see de.splatgames.aether.datafixers.api.dynamic.DynamicOps#convertTo(DynamicOps, Object) + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class CrossFormatBenchmark { + + /** + * Payload size parameter controlling test data complexity. + * + *

Limited to SMALL and MEDIUM as cross-format conversion overhead + * can be significant with large data sets.

+ */ + @Param({"SMALL", "MEDIUM"}) + private PayloadSize payloadSize; + + /** + * Google Gson DynamicOps implementation. + */ + private GsonOps gsonOps; + + /** + * Jackson JSON DynamicOps implementation. + */ + private JacksonJsonOps jacksonJsonOps; + + /** + * SnakeYAML DynamicOps implementation using native Java types. + */ + private SnakeYamlOps snakeYamlOps; + + /** + * Jackson YAML DynamicOps implementation. + */ + private JacksonYamlOps jacksonYamlOps; + + /** + * Pre-generated Gson root element for conversion benchmarks. + */ + private JsonElement gsonRoot; + + /** + * Pre-generated Jackson JSON root node for conversion benchmarks. + */ + private JsonNode jacksonJsonRoot; + + /** + * Pre-generated SnakeYAML root object for conversion benchmarks. + */ + private Object snakeYamlRoot; + + /** + * Pre-generated Jackson YAML root node for conversion benchmarks. + */ + private JsonNode jacksonYamlRoot; + + /** + * Initializes all DynamicOps instances and pre-generates test data in each format. + * + *

Data is pre-generated in each format to ensure conversion benchmarks measure + * only the conversion overhead, not data generation time.

+ */ + @Setup(Level.Trial) + public void setup() { + this.gsonOps = GsonOps.INSTANCE; + this.jacksonJsonOps = JacksonJsonOps.INSTANCE; + this.snakeYamlOps = SnakeYamlOps.INSTANCE; + this.jacksonYamlOps = JacksonYamlOps.INSTANCE; + + this.gsonRoot = BenchmarkDataGenerator.generate(this.gsonOps, this.payloadSize).value(); + this.jacksonJsonRoot = BenchmarkDataGenerator.generate(this.jacksonJsonOps, this.payloadSize).value(); + this.snakeYamlRoot = BenchmarkDataGenerator.generate(this.snakeYamlOps, this.payloadSize).value(); + this.jacksonYamlRoot = BenchmarkDataGenerator.generate(this.jacksonYamlOps, this.payloadSize).value(); + } + + // ==================== Gson <-> Jackson JSON Conversions ==================== + + /** + * Benchmarks conversion from Gson JsonElement to Jackson JsonNode. + * + *

Measures the overhead of converting between two JSON libraries. + * Both represent JSON but use different internal tree structures.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonToJackson(final Blackhole blackhole) { + final JsonNode result = this.jacksonJsonOps.convertTo(this.gsonOps, this.gsonRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from Jackson JsonNode to Gson JsonElement. + * + *

Measures the reverse conversion from Jackson to Gson representation.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonToGson(final Blackhole blackhole) { + final JsonElement result = this.gsonOps.convertTo(this.jacksonJsonOps, this.jacksonJsonRoot); + blackhole.consume(result); + } + + // ==================== Gson <-> SnakeYAML Conversions ==================== + + /** + * Benchmarks conversion from Gson JsonElement to SnakeYAML native types. + * + *

Measures cross-ecosystem conversion from JSON library to YAML library. + * SnakeYAML uses native Java Maps and Lists internally.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonToSnakeYaml(final Blackhole blackhole) { + final Object result = this.snakeYamlOps.convertTo(this.gsonOps, this.gsonRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from SnakeYAML native types to Gson JsonElement. + * + *

Measures cross-ecosystem conversion from YAML native types to JSON tree.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlToGson(final Blackhole blackhole) { + final JsonElement result = this.gsonOps.convertTo(this.snakeYamlOps, this.snakeYamlRoot); + blackhole.consume(result); + } + + // ==================== Jackson JSON <-> Jackson YAML Conversions ==================== + + /** + * Benchmarks conversion from Jackson JSON to Jackson YAML. + * + *

Measures conversion within the Jackson ecosystem. Both formats use + * JsonNode internally, potentially enabling optimizations.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonJsonToYaml(final Blackhole blackhole) { + final JsonNode result = this.jacksonYamlOps.convertTo(this.jacksonJsonOps, this.jacksonJsonRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from Jackson YAML to Jackson JSON. + * + *

Measures reverse conversion within the Jackson ecosystem.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlToJson(final Blackhole blackhole) { + final JsonNode result = this.jacksonJsonOps.convertTo(this.jacksonYamlOps, this.jacksonYamlRoot); + blackhole.consume(result); + } + + // ==================== SnakeYAML <-> Jackson YAML Conversions ==================== + + /** + * Benchmarks conversion from SnakeYAML native types to Jackson YAML JsonNode. + * + *

Measures conversion between two YAML libraries with different internal + * representations (native Java types vs JsonNode).

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlToJacksonYaml(final Blackhole blackhole) { + final JsonNode result = this.jacksonYamlOps.convertTo(this.snakeYamlOps, this.snakeYamlRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from Jackson YAML JsonNode to SnakeYAML native types. + * + *

Measures reverse conversion from JsonNode to native Java Maps/Lists.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlToSnakeYaml(final Blackhole blackhole) { + final Object result = this.snakeYamlOps.convertTo(this.jacksonYamlOps, this.jacksonYamlRoot); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java new file mode 100644 index 0000000..1a87c58 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java @@ -0,0 +1,410 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.format; + +import com.fasterxml.jackson.databind.JsonNode; +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.jetbrains.annotations.Nullable; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark comparing JSON DynamicOps implementations: Gson vs Jackson. + * + *

This benchmark measures the performance of JSON-based operations using two + * different underlying libraries: Google Gson ({@link GsonOps}) and Jackson Databind + * ({@link JacksonJsonOps}). The results help determine which implementation is more + * suitable for specific use cases.

+ * + *

Benchmark Categories

+ * + *

Data Generation

+ *

Measure Dynamic object construction performance:

+ *
    + *
  • {@link #gsonGenerate} - Create Dynamic using GsonOps
  • + *
  • {@link #jacksonGenerate} - Create Dynamic using JacksonJsonOps
  • + *
+ * + *

Field Access

+ *

Measure field read operations on existing data:

+ *
    + *
  • {@link #gsonFieldRead} - Read field from Gson-backed Dynamic
  • + *
  • {@link #jacksonFieldRead} - Read field from Jackson-backed Dynamic
  • + *
+ * + *

Field Modification

+ *

Measure field write/set operations:

+ *
    + *
  • {@link #gsonFieldSet} - Set field on Gson-backed Dynamic
  • + *
  • {@link #jacksonFieldSet} - Set field on Jackson-backed Dynamic
  • + *
+ * + *

Migration

+ *

Measure DataFixer migration performance:

+ *
    + *
  • {@link #gsonMigration} - Apply fix to Gson-backed data
  • + *
  • {@link #jacksonMigration} - Apply fix to Jackson-backed data
  • + *
  • {@link #crossFormatMigrationJacksonInput} - Cross-format migration scenario
  • + *
+ * + *

Implementations Compared

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ImplementationLibraryNode TypeCharacteristics
{@link GsonOps}Google Gson{@code JsonElement}Simple API, smaller footprint, widely used
{@link JacksonJsonOps}Jackson Databind{@code JsonNode}Feature-rich, streaming support, high performance
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
payloadSizeSMALL, MEDIUM, LARGETest data complexity
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Usage

+ *
{@code
+ * # Run all JSON benchmarks
+ * java -jar benchmarks.jar JsonBenchmark
+ *
+ * # Compare only field access performance
+ * java -jar benchmarks.jar "JsonBenchmark.*FieldRead"
+ *
+ * # Run Gson-only benchmarks
+ * java -jar benchmarks.jar "JsonBenchmark.gson.*"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar JsonBenchmark -p payloadSize=LARGE
+ * }
+ * + * @author Erik Pförtner + * @see YamlBenchmark + * @see TomlXmlBenchmark + * @see CrossFormatBenchmark + * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps + * @see de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class JsonBenchmark { + + /** + * Field name used for read/write benchmarks. + * + *

References the first string field generated by {@link BenchmarkDataGenerator}.

+ */ + private static final String FIELD_NAME = "stringField0"; + + /** + * Payload size parameter controlling test data complexity. + * + *

Injected by JMH to run benchmarks across different data sizes.

+ */ + @Param({"SMALL", "MEDIUM", "LARGE"}) + private PayloadSize payloadSize; + + /** + * Google Gson DynamicOps implementation. + */ + private GsonOps gsonOps; + + /** + * Jackson Databind DynamicOps implementation. + */ + private JacksonJsonOps jacksonOps; + + /** + * Pre-generated test data using Gson. + */ + private Dynamic gsonData; + + /** + * Pre-generated test data using Jackson. + */ + private Dynamic jacksonData; + + /** + * DataFixer for Gson-based migrations. + */ + private DataFixer gsonFixer; + + /** + * Optional DataFixer for Jackson-based migrations. + * + *

May be {@code null} if no dedicated Jackson fixer is configured. + * In that case, cross-format migration behavior is measured instead.

+ */ + @Nullable + private DataFixer jacksonFixer; + + /** + * Source version for migrations (v1). + */ + private DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + private DataVersion toVersion; + + /** + * Initializes DynamicOps instances, test data, and DataFixers. + * + *

Both Gson and Jackson data are pre-generated to isolate benchmark + * measurements from data creation overhead (except for generation benchmarks).

+ */ + @Setup(Level.Trial) + public void setup() { + this.gsonOps = GsonOps.INSTANCE; + this.jacksonOps = JacksonJsonOps.INSTANCE; + + this.gsonData = BenchmarkDataGenerator.generate(this.gsonOps, this.payloadSize); + this.jacksonData = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize); + + this.gsonFixer = BenchmarkBootstrap.createSingleFixFixer(); + + // If you have a dedicated Jackson fixer, wire it here. Otherwise keep it null and measure cross-format explicitly. + // Example (if you add it later): this.jacksonFixer = BenchmarkBootstrap.createSingleFixFixerJackson(); + this.jacksonFixer = null; + + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + + // ==================== Data Generation Benchmarks ==================== + + /** + * Benchmarks Dynamic object generation using GsonOps. + * + *

Measures the time to create a complete test data structure using + * Gson as the underlying JSON representation.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.gsonOps, this.payloadSize); + blackhole.consume(data); + } + + /** + * Benchmarks Dynamic object generation using JacksonJsonOps. + * + *

Measures the time to create a complete test data structure using + * Jackson as the underlying JSON representation.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize); + blackhole.consume(data); + } + + // ==================== Field Access Benchmarks ==================== + + /** + * Benchmarks field read access on Gson-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * Gson-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonFieldRead(final Blackhole blackhole) { + final Dynamic field = this.gsonData.get(FIELD_NAME); + blackhole.consume(field); + } + + /** + * Benchmarks field read access on Jackson-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * Jackson-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonFieldRead(final Blackhole blackhole) { + final Dynamic field = this.jacksonData.get(FIELD_NAME); + blackhole.consume(field); + } + + // ==================== Field Modification Benchmarks ==================== + + /** + * Benchmarks field set operation on Gson-backed Dynamic. + * + *

Measures the time to add a new field to a Gson-based Dynamic object. + * This operation typically creates a new Dynamic with the modified content.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonFieldSet(final Blackhole blackhole) { + final Dynamic result = this.gsonData.set( + "newField", + this.gsonData.createString("newValue") + ); + blackhole.consume(result); + } + + /** + * Benchmarks field set operation on Jackson-backed Dynamic. + * + *

Measures the time to add a new field to a Jackson-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonFieldSet(final Blackhole blackhole) { + final Dynamic result = this.jacksonData.set( + "newField", + this.jacksonData.createString("newValue") + ); + blackhole.consume(result); + } + + // ==================== Migration Benchmarks ==================== + + /** + * Benchmarks DataFixer migration on Gson-backed data. + * + *

Measures the time to apply a single fix migration to Gson-based + * Dynamic data. This represents the typical migration scenario where + * both fixer and data use the same DynamicOps implementation.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonMigration(final Blackhole blackhole) { + final Dynamic result = this.gsonFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.gsonData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks DataFixer migration on Jackson-backed data. + * + *

If a dedicated Jackson fixer is available, measures native Jackson + * migration. Otherwise, falls back to cross-format migration using the + * Gson-based fixer with Jackson input data.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonMigration(final Blackhole blackhole) { + if (this.jacksonFixer == null) { + // No dedicated Jackson fixer available -> this would not be a fair "Jackson migration" benchmark. + // Measure the cross-format behavior explicitly instead. + final Dynamic result = this.gsonFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.jacksonData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + return; + } + + final Dynamic result = this.jacksonFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.jacksonData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks cross-format migration with Jackson input and Gson-based fixer. + * + *

Measures the performance overhead when the fixer's DynamicOps differs + * from the input data's DynamicOps. This scenario is common when migrating + * data from various sources through a centralized fixer.

+ * + *

Comparing this benchmark with {@link #gsonMigration} reveals the + * overhead of format conversion during migration.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void crossFormatMigrationJacksonInput(final Blackhole blackhole) { + final Dynamic result = this.gsonFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.jacksonData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java new file mode 100644 index 0000000..2dc134c --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.format; + +import com.fasterxml.jackson.databind.JsonNode; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.toml.jackson.JacksonTomlOps; +import de.splatgames.aether.datafixers.codec.xml.jackson.JacksonXmlOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for TOML and XML DynamicOps implementations via Jackson. + * + *

This benchmark measures the performance of TOML and XML format operations + * using Jackson-based implementations ({@link JacksonTomlOps} and {@link JacksonXmlOps}). + * Both formats share Jackson's unified API, enabling direct performance comparison.

+ * + *

Benchmark Categories

+ * + *

Data Generation

+ *

Measure Dynamic object construction performance:

+ *
    + *
  • {@link #tomlGenerate} - Create Dynamic using JacksonTomlOps
  • + *
  • {@link #xmlGenerate} - Create Dynamic using JacksonXmlOps
  • + *
+ * + *

Field Access

+ *

Measure field read operations on existing data:

+ *
    + *
  • {@link #tomlFieldRead} - Read field from TOML-backed Dynamic
  • + *
  • {@link #xmlFieldRead} - Read field from XML-backed Dynamic
  • + *
+ * + *

Field Modification

+ *

Measure field write/set operations:

+ *
    + *
  • {@link #tomlFieldSet} - Set field on TOML-backed Dynamic
  • + *
  • {@link #xmlFieldSet} - Set field on XML-backed Dynamic
  • + *
+ * + *

Migration

+ *

Measure DataFixer migration performance:

+ *
    + *
  • {@link #tomlMigration} - Apply fix to TOML-backed data
  • + *
  • {@link #xmlMigration} - Apply fix to XML-backed data
  • + *
+ * + *

Implementations

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ImplementationLibraryNode TypeUse Case
{@link JacksonTomlOps}Jackson Dataformat TOML{@code JsonNode}Configuration files, Rust ecosystem integration
{@link JacksonXmlOps}Jackson Dataformat XML{@code JsonNode}Legacy systems, SOAP/REST APIs, document formats
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
payloadSizeSMALL, MEDIUMTest data complexity (LARGE excluded for performance)
+ * + *

Note: The LARGE payload size is excluded from this benchmark because + * TOML and XML serialization typically have higher overhead than JSON/YAML, + * making large payloads impractical for typical use cases.

+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Usage

+ *
{@code
+ * # Run all TOML/XML benchmarks
+ * java -jar benchmarks.jar TomlXmlBenchmark
+ *
+ * # Run TOML-only benchmarks
+ * java -jar benchmarks.jar "TomlXmlBenchmark.toml.*"
+ *
+ * # Run XML-only benchmarks
+ * java -jar benchmarks.jar "TomlXmlBenchmark.xml.*"
+ *
+ * # Compare generation performance
+ * java -jar benchmarks.jar "TomlXmlBenchmark.*Generate"
+ * }
+ * + * @author Erik Pförtner + * @see JsonBenchmark + * @see YamlBenchmark + * @see CrossFormatBenchmark + * @see de.splatgames.aether.datafixers.codec.toml.jackson.JacksonTomlOps + * @see de.splatgames.aether.datafixers.codec.xml.jackson.JacksonXmlOps + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class TomlXmlBenchmark { + + /** + * Field name used for read/write benchmarks. + * + *

References the first string field generated by {@link BenchmarkDataGenerator}.

+ */ + private static final String FIELD_NAME = "stringField0"; + + /** + * Payload size parameter controlling test data complexity. + * + *

Limited to SMALL and MEDIUM to avoid excessive benchmark runtime + * with the higher overhead of TOML and XML processing.

+ */ + @Param({"SMALL", "MEDIUM"}) + private PayloadSize payloadSize; + + /** + * Jackson TOML DynamicOps implementation. + */ + private JacksonTomlOps tomlOps; + + /** + * Jackson XML DynamicOps implementation. + */ + private JacksonXmlOps xmlOps; + + /** + * Pre-generated test data using TOML format. + */ + private Dynamic tomlData; + + /** + * Pre-generated test data using XML format. + */ + private Dynamic xmlData; + + /** + * DataFixer for migration benchmarks. + */ + private DataFixer fixer; + + /** + * Source version for migrations (v1). + */ + private DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + private DataVersion toVersion; + + /** + * Initializes DynamicOps instances, test data, and DataFixer. + * + *

Both TOML and XML data are pre-generated to isolate benchmark + * measurements from data creation overhead.

+ */ + @Setup(Level.Trial) + public void setup() { + this.tomlOps = JacksonTomlOps.INSTANCE; + this.xmlOps = JacksonXmlOps.INSTANCE; + + this.tomlData = BenchmarkDataGenerator.generate(this.tomlOps, this.payloadSize); + this.xmlData = BenchmarkDataGenerator.generate(this.xmlOps, this.payloadSize); + + this.fixer = BenchmarkBootstrap.createSingleFixFixer(); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + + // ==================== Data Generation Benchmarks ==================== + + /** + * Benchmarks Dynamic object generation using JacksonTomlOps. + * + *

Measures the time to create a complete test data structure using + * Jackson's TOML dataformat module.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.tomlOps, this.payloadSize); + blackhole.consume(data); + } + + /** + * Benchmarks Dynamic object generation using JacksonXmlOps. + * + *

Measures the time to create a complete test data structure using + * Jackson's XML dataformat module.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.xmlOps, this.payloadSize); + blackhole.consume(data); + } + + // ==================== Field Access Benchmarks ==================== + + /** + * Benchmarks field read access on TOML-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * TOML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlFieldRead(final Blackhole blackhole) { + final Dynamic field = this.tomlData.get(FIELD_NAME); + blackhole.consume(field); + } + + /** + * Benchmarks field read access on XML-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * XML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlFieldRead(final Blackhole blackhole) { + final Dynamic field = this.xmlData.get(FIELD_NAME); + blackhole.consume(field); + } + + // ==================== Field Modification Benchmarks ==================== + + /** + * Benchmarks field set operation on TOML-backed Dynamic. + * + *

Measures the time to add a new field to a TOML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlFieldSet(final Blackhole blackhole) { + final Dynamic result = this.tomlData.set( + "newField", + this.tomlData.createString("newValue") + ); + blackhole.consume(result); + } + + /** + * Benchmarks field set operation on XML-backed Dynamic. + * + *

Measures the time to add a new field to an XML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlFieldSet(final Blackhole blackhole) { + final Dynamic result = this.xmlData.set( + "newField", + this.xmlData.createString("newValue") + ); + blackhole.consume(result); + } + + // ==================== Migration Benchmarks ==================== + + /** + * Benchmarks DataFixer migration on TOML-backed data. + * + *

Measures the time to apply a single fix migration to TOML-based + * Dynamic data.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlMigration(final Blackhole blackhole) { + final Dynamic result = this.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.tomlData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks DataFixer migration on XML-backed data. + * + *

Measures the time to apply a single fix migration to XML-based + * Dynamic data.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlMigration(final Blackhole blackhole) { + final Dynamic result = this.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.xmlData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java new file mode 100644 index 0000000..c0f2862 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.format; + +import com.fasterxml.jackson.databind.JsonNode; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps; +import de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark comparing YAML DynamicOps implementations: SnakeYAML vs Jackson YAML. + * + *

This benchmark measures the performance of YAML-based operations using two + * different underlying libraries: SnakeYAML ({@link SnakeYamlOps}) and Jackson YAML + * ({@link JacksonYamlOps}). YAML is commonly used for configuration files and + * human-readable data serialization.

+ * + *

Benchmark Categories

+ * + *

Data Generation

+ *

Measure Dynamic object construction performance:

+ *
    + *
  • {@link #snakeYamlGenerate} - Create Dynamic using SnakeYamlOps
  • + *
  • {@link #jacksonYamlGenerate} - Create Dynamic using JacksonYamlOps
  • + *
+ * + *

Field Access

+ *

Measure field read operations on existing data:

+ *
    + *
  • {@link #snakeYamlFieldRead} - Read field from SnakeYAML-backed Dynamic
  • + *
  • {@link #jacksonYamlFieldRead} - Read field from Jackson YAML-backed Dynamic
  • + *
+ * + *

Field Modification

+ *

Measure field write/set operations:

+ *
    + *
  • {@link #snakeYamlFieldSet} - Set field on SnakeYAML-backed Dynamic
  • + *
  • {@link #jacksonYamlFieldSet} - Set field on Jackson YAML-backed Dynamic
  • + *
+ * + *

Migration

+ *

Measure DataFixer migration performance:

+ *
    + *
  • {@link #snakeYamlMigration} - Apply fix to SnakeYAML-backed data
  • + *
  • {@link #jacksonYamlMigration} - Apply fix to Jackson YAML-backed data
  • + *
+ * + *

Implementations Compared

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ImplementationLibraryNode TypeCharacteristics
{@link SnakeYamlOps}SnakeYAML{@code Object} (native Java types)Native YAML library, uses Maps/Lists, anchors & aliases support
{@link JacksonYamlOps}Jackson Dataformat YAML{@code JsonNode}Unified Jackson API, shares code with JSON, streaming support
+ * + *

Parameters

+ * + * + * + *
ParameterValuesDescription
payloadSizeSMALL, MEDIUM, LARGETest data complexity
+ * + *

Benchmark Configuration

+ * + * + * + * + * + * + * + *
SettingValue
Warmup5 iterations, 1 second each
Measurement10 iterations, 1 second each
Forks2
JVM Heap2 GB min/max
Time UnitMicroseconds
+ * + *

Usage

+ *
{@code
+ * # Run all YAML benchmarks
+ * java -jar benchmarks.jar YamlBenchmark
+ *
+ * # Compare only generation performance
+ * java -jar benchmarks.jar "YamlBenchmark.*Generate"
+ *
+ * # Run SnakeYAML-only benchmarks
+ * java -jar benchmarks.jar "YamlBenchmark.snakeYaml.*"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar YamlBenchmark -p payloadSize=MEDIUM
+ * }
+ * + * @author Erik Pförtner + * @see JsonBenchmark + * @see TomlXmlBenchmark + * @see CrossFormatBenchmark + * @see de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps + * @see de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps + * @since 1.0.0 + */ +@BenchmarkMode({Mode.Throughput, Mode.AverageTime}) +@OutputTimeUnit(TimeUnit.MICROSECONDS) +@State(Scope.Benchmark) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"}) +public class YamlBenchmark { + + /** + * Field name used for read/write benchmarks. + * + *

References the first string field generated by {@link BenchmarkDataGenerator}.

+ */ + private static final String FIELD_NAME = "stringField0"; + + /** + * Payload size parameter controlling test data complexity. + * + *

Injected by JMH to run benchmarks across different data sizes.

+ */ + @Param({"SMALL", "MEDIUM", "LARGE"}) + private PayloadSize payloadSize; + + /** + * SnakeYAML DynamicOps implementation using native Java types. + */ + private SnakeYamlOps snakeOps; + + /** + * Jackson YAML DynamicOps implementation using JsonNode. + */ + private JacksonYamlOps jacksonOps; + + /** + * Pre-generated test data using SnakeYAML. + */ + private Dynamic snakeYamlData; + + /** + * Pre-generated test data using Jackson YAML. + */ + private Dynamic jacksonYamlData; + + /** + * DataFixer for migration benchmarks. + */ + private DataFixer fixer; + + /** + * Source version for migrations (v1). + */ + private DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + private DataVersion toVersion; + + /** + * Initializes DynamicOps instances, test data, and DataFixer. + * + *

Both SnakeYAML and Jackson YAML data are pre-generated to isolate + * benchmark measurements from data creation overhead.

+ */ + @Setup(Level.Trial) + public void setup() { + this.snakeOps = SnakeYamlOps.INSTANCE; + this.jacksonOps = JacksonYamlOps.INSTANCE; + + this.snakeYamlData = BenchmarkDataGenerator.generate(this.snakeOps, this.payloadSize); + this.jacksonYamlData = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize); + + this.fixer = BenchmarkBootstrap.createSingleFixFixer(); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + + // ==================== Data Generation Benchmarks ==================== + + /** + * Benchmarks Dynamic object generation using SnakeYamlOps. + * + *

Measures the time to create a complete test data structure using + * SnakeYAML's native Java type representation (Maps and Lists).

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.snakeOps, this.payloadSize); + blackhole.consume(data); + } + + /** + * Benchmarks Dynamic object generation using JacksonYamlOps. + * + *

Measures the time to create a complete test data structure using + * Jackson's JsonNode representation for YAML.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlGenerate(final Blackhole blackhole) { + final Dynamic data = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize); + blackhole.consume(data); + } + + // ==================== Field Access Benchmarks ==================== + + /** + * Benchmarks field read access on SnakeYAML-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * SnakeYAML-based Dynamic object (backed by Java Map).

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlFieldRead(final Blackhole blackhole) { + final Dynamic field = this.snakeYamlData.get(FIELD_NAME); + blackhole.consume(field); + } + + /** + * Benchmarks field read access on Jackson YAML-backed Dynamic. + * + *

Measures the time to retrieve a single field from a pre-existing + * Jackson YAML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlFieldRead(final Blackhole blackhole) { + final Dynamic field = this.jacksonYamlData.get(FIELD_NAME); + blackhole.consume(field); + } + + // ==================== Field Modification Benchmarks ==================== + + /** + * Benchmarks field set operation on SnakeYAML-backed Dynamic. + * + *

Measures the time to add a new field to a SnakeYAML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlFieldSet(final Blackhole blackhole) { + final Dynamic result = this.snakeYamlData.set( + "newField", + this.snakeYamlData.createString("newValue") + ); + blackhole.consume(result); + } + + /** + * Benchmarks field set operation on Jackson YAML-backed Dynamic. + * + *

Measures the time to add a new field to a Jackson YAML-based Dynamic object.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlFieldSet(final Blackhole blackhole) { + final Dynamic result = this.jacksonYamlData.set( + "newField", + this.jacksonYamlData.createString("newValue") + ); + blackhole.consume(result); + } + + // ==================== Migration Benchmarks ==================== + + /** + * Benchmarks DataFixer migration on SnakeYAML-backed data. + * + *

Measures the time to apply a single fix migration to SnakeYAML-based + * Dynamic data.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlMigration(final Blackhole blackhole) { + final Dynamic result = this.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.snakeYamlData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } + + /** + * Benchmarks DataFixer migration on Jackson YAML-backed data. + * + *

Measures the time to apply a single fix migration to Jackson YAML-based + * Dynamic data.

+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlMigration(final Blackhole blackhole) { + final Dynamic result = this.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + this.jacksonYamlData, + this.fromVersion, + this.toVersion + ); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/package-info.java new file mode 100644 index 0000000..d2c5b40 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/package-info.java @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Format-focused JMH benchmarks comparing DynamicOps implementations in the Aether DataFixers framework. + * + *

This package contains benchmarks that compare the performance of different serialization + * format implementations. These benchmarks help users choose the optimal DynamicOps implementation + * for their specific use case based on empirical performance data.

+ * + *

Benchmark Classes

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ClassFormats ComparedKey Metrics
{@link de.splatgames.aether.datafixers.benchmarks.format.JsonBenchmark}GsonOps vs JacksonJsonOpsGeneration, field access, modification, migration
{@link de.splatgames.aether.datafixers.benchmarks.format.YamlBenchmark}SnakeYamlOps vs JacksonYamlOpsGeneration, field access, modification, migration
{@link de.splatgames.aether.datafixers.benchmarks.format.TomlXmlBenchmark}JacksonTomlOps vs JacksonXmlOpsGeneration, field access, modification, migration
{@link de.splatgames.aether.datafixers.benchmarks.format.CrossFormatBenchmark}All format pairsCross-format conversion overhead
+ * + *

Supported DynamicOps Implementations

+ * + * + * + * + * + * + * + * + *
FormatImplementationLibraryNode Type
JSONGsonOpsGoogle GsonJsonElement
JacksonJsonOpsJackson DatabindJsonNode
YAMLSnakeYamlOpsSnakeYAMLObject (native)
JacksonYamlOpsJackson Dataformat YAMLJsonNode
TOMLJacksonTomlOpsJackson Dataformat TOMLJsonNode
XMLJacksonXmlOpsJackson Dataformat XMLJsonNode
+ * + *

Benchmark Operations

+ *

Each format benchmark measures the following operations:

+ *
    + *
  • Data Generation: Time to create Dynamic objects from scratch
  • + *
  • Field Read: Time to retrieve a single field from existing data
  • + *
  • Field Set: Time to add/modify a field (creates new immutable structure)
  • + *
  • Migration: Time to apply a DataFix to format-specific data
  • + *
+ * + *

Running Format Benchmarks

+ *
{@code
+ * # Run all format benchmarks
+ * java -jar benchmarks.jar ".*format.*"
+ *
+ * # Run specific format benchmark
+ * java -jar benchmarks.jar JsonBenchmark
+ * java -jar benchmarks.jar YamlBenchmark
+ * java -jar benchmarks.jar TomlXmlBenchmark
+ * java -jar benchmarks.jar CrossFormatBenchmark
+ *
+ * # Run all JSON-related benchmarks
+ * java -jar benchmarks.jar ".*Json.*"
+ *
+ * # Run generation benchmarks across all formats
+ * java -jar benchmarks.jar ".*Benchmark.*Generate"
+ *
+ * # Run migration benchmarks across all formats
+ * java -jar benchmarks.jar ".*Benchmark.*Migration"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar ".*format.*" -p payloadSize=MEDIUM
+ * }
+ * + *

Choosing a DynamicOps Implementation

+ *

Use these benchmark results to guide implementation selection:

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ScenarioRecommendedRationale
General JSON processingGsonOps or JacksonJsonOpsCompare benchmarks; both are mature and fast
Configuration files (YAML)SnakeYamlOpsNative YAML features (anchors, aliases)
Mixed Jackson ecosystemJacksonJsonOps/JacksonYamlOpsShared code, faster cross-format conversion
TOML configurationJacksonTomlOpsOnly TOML option; good for Rust interop
Legacy XML systemsJacksonXmlOpsOnly XML option; document format support
+ * + *

Cross-Format Conversion

+ *

The {@link de.splatgames.aether.datafixers.benchmarks.format.CrossFormatBenchmark} + * measures conversion overhead between formats. Key insights:

+ *
    + *
  • Same-ecosystem: Jackson JSON ↔ Jackson YAML is fastest (shared JsonNode)
  • + *
  • Cross-ecosystem: Gson ↔ SnakeYAML requires full tree traversal
  • + *
  • Asymmetry: A→B may differ from B→A due to construction costs
  • + *
+ * + *

Interpreting Results

+ *
    + *
  • Throughput: Higher ops/sec is better for high-volume scenarios
  • + *
  • Average time: Lower latency is better for interactive applications
  • + *
  • Scaling: Compare SMALL vs MEDIUM vs LARGE to understand data volume impact
  • + *
  • Variance: High ± values may indicate GC sensitivity or JIT instability
  • + *
+ * + * @see de.splatgames.aether.datafixers.benchmarks.format.JsonBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.format.YamlBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.format.TomlXmlBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.format.CrossFormatBenchmark + * @see de.splatgames.aether.datafixers.api.dynamic.DynamicOps + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.format; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/package-info.java new file mode 100644 index 0000000..be94d9b --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/package-info.java @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * JMH benchmark suite for the Aether DataFixers framework. + * + *

This package and its sub-packages provide comprehensive performance benchmarks + * for all major components of the Aether DataFixers system. The benchmarks use + * JMH (Java Microbenchmark Harness) + * for accurate, reliable performance measurements.

+ * + *

Package Structure

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
PackageFocus AreaKey Benchmarks
{@link de.splatgames.aether.datafixers.benchmarks.core core}DataFixer migration performanceSingleFixBenchmark, MultiFixChainBenchmark, SchemaLookupBenchmark
{@link de.splatgames.aether.datafixers.benchmarks.codec codec}Codec encode/decode performancePrimitiveCodecBenchmark, CollectionCodecBenchmark
{@link de.splatgames.aether.datafixers.benchmarks.concurrent concurrent}Thread-safety and scalabilityConcurrentMigrationBenchmark
{@code format}DynamicOps format comparisonsJsonBenchmark, YamlBenchmark, CrossFormatBenchmark
{@link de.splatgames.aether.datafixers.benchmarks.util util}Benchmark infrastructureBenchmarkBootstrap, BenchmarkDataGenerator, PayloadSize
+ * + *

Running Benchmarks

+ * + *

Via Maven (Development)

+ *
{@code
+ * # Run all benchmarks
+ * mvn exec:java -pl aether-datafixers-benchmarks
+ *
+ * # Run specific benchmark
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="SingleFixBenchmark"
+ * }
+ * + *

Via Fat JAR (Production)

+ *
{@code
+ * # Build the benchmark JAR
+ * mvn clean package -pl aether-datafixers-benchmarks -DskipTests
+ *
+ * # Run all benchmarks
+ * java -jar target/aether-datafixers-benchmarks-*-benchmarks.jar
+ *
+ * # List available benchmarks
+ * java -jar target/*-benchmarks.jar -l
+ *
+ * # Run with parameters
+ * java -jar target/*-benchmarks.jar -p payloadSize=LARGE -wi 5 -i 10 -f 2
+ *
+ * # Output JSON results
+ * java -jar target/*-benchmarks.jar -rf json -rff results.json
+ * }
+ * + *

Programmatic API

+ *
{@code
+ * // Run all benchmarks
+ * BenchmarkRunner.runAllBenchmarks();
+ *
+ * // Run quick validation
+ * BenchmarkRunner.runQuickBenchmarks();
+ *
+ * // Run specific category
+ * BenchmarkRunner.runCoreBenchmarks();
+ * BenchmarkRunner.runFormatBenchmarks();
+ * }
+ * + *

Benchmark Categories Explained

+ * + *

Core Benchmarks

+ *

Measure the fundamental DataFixer operations:

+ *
    + *
  • Single fix: Baseline performance for one migration step
  • + *
  • Chain migration: How performance scales with migration path length
  • + *
  • Schema lookup: Registry access patterns and caching effectiveness
  • + *
+ * + *

Codec Benchmarks

+ *

Measure serialization and deserialization performance:

+ *
    + *
  • Primitive codecs: Baseline for bool, int, long, float, double, string
  • + *
  • Collection codecs: List encoding/decoding with size scaling
  • + *
  • Round-trip: Combined encode + decode performance
  • + *
+ * + *

Concurrent Benchmarks

+ *

Validate thread-safety and measure scalability:

+ *
    + *
  • Multi-threaded migration: Contention under concurrent load
  • + *
  • Registry access: Concurrent read performance
  • + *
  • Scaling analysis: Fixed thread counts (4, 8, MAX)
  • + *
+ * + *

Format Benchmarks

+ *

Compare different DynamicOps implementations:

+ *
    + *
  • JSON: GsonOps vs JacksonJsonOps
  • + *
  • YAML: SnakeYamlOps vs JacksonYamlOps
  • + *
  • Other: TOML and XML via Jackson
  • + *
  • Cross-format: Conversion between formats
  • + *
+ * + *

Default Configuration

+ * + * + * + * + * + * + * + *
SettingValuePurpose
Warmup iterations5JIT compilation stabilization
Measurement iterations10Statistical significance
Forks2JVM variance mitigation
JVM heap2 GBAvoid GC interference
Time unitVariesns for primitives, µs for complex ops
+ * + *

Interpreting Results

+ *
    + *
  • Throughput (ops/time): Higher is better; measures operation rate
  • + *
  • Average time (time/op): Lower is better; measures latency
  • + *
  • Error (±): 99.9% confidence interval; smaller is more reliable
  • + *
  • Scaling: Compare across parameter values (payload size, thread count)
  • + *
+ * + *

Common JMH Options

+ * + * + * + * + * + * + * + * + * + * + * + * + *
OptionDescription
{@code -wi N}Number of warmup iterations
{@code -i N}Number of measurement iterations
{@code -f N}Number of forks (JVM instances)
{@code -t N}Number of threads
{@code -p key=value}Set parameter value
{@code -rf format}Result format (json, csv, text)
{@code -rff file}Result output file
{@code -prof profiler}Enable profiler (gc, async, jfr)
{@code -l}List available benchmarks
{@code -h}Show help
+ * + *

Best Practices

+ *
    + *
  • Isolated environment: Run on dedicated hardware with minimal background processes
  • + *
  • Multiple forks: Use at least 2 forks for reliable results
  • + *
  • Sufficient warmup: Allow JIT compilation to stabilize before measurement
  • + *
  • Consistent conditions: Compare results from the same machine and JVM version
  • + *
  • Statistical analysis: Consider error margins when comparing results
  • + *
+ * + * @see de.splatgames.aether.datafixers.benchmarks.BenchmarkRunner + * @see de.splatgames.aether.datafixers.benchmarks.core + * @see de.splatgames.aether.datafixers.benchmarks.codec + * @see de.splatgames.aether.datafixers.benchmarks.concurrent + * @see de.splatgames.aether.datafixers.benchmarks.util + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkBootstrap.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkBootstrap.java new file mode 100644 index 0000000..1139e7e --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkBootstrap.java @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.util; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.TypeReference; +import de.splatgames.aether.datafixers.api.fix.DataFix; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.core.fix.DataFixerBuilder; +import de.splatgames.aether.datafixers.core.fix.noop.NoOpDataFixerContext; +import de.splatgames.aether.datafixers.testkit.factory.QuickFix; +import org.jetbrains.annotations.NotNull; + +/** + * Factory for pre-configured {@link DataFixer} instances optimized for benchmarking. + * + *

This utility class provides various DataFixer configurations for measuring + * different aspects of migration performance. All created fixers use {@link NoOpDataFixerContext} to eliminate logging + * overhead during benchmark measurements.

+ * + *

Available Fixer Configurations

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
MethodFix CountFix TypesUse Case
{@link #createSingleFixFixer()}1RenameBaseline single-operation performance
{@link #createIdentityFixer()}1Identity (no-op)Framework overhead measurement
{@link #createChainFixer(int)}1-100Rename (homogeneous)Chain length scaling analysis
{@link #createMixedFixer(int)}4+Rename, Add, Remove, TransformRealistic migration scenarios
{@link #createPlayerFixer()}4Mixed (realistic)Domain-specific migration testing
+ * + *

Type References

+ *

Two type references are provided for categorizing benchmark data:

+ *
    + *
  • {@link #BENCHMARK_TYPE} - Generic benchmark data (used by most benchmarks)
  • + *
  • {@link #PLAYER_TYPE} - Player-like data structures (for domain-specific tests)
  • + *
+ * + *

Design Considerations

+ *
    + *
  • No-op context: All fixers use {@link NoOpDataFixerContext} to prevent + * logging from affecting benchmark measurements
  • + *
  • GsonOps: All fixes use {@link GsonOps} as the reference DynamicOps + * implementation for consistency
  • + *
  • Testkit integration: Uses {@link QuickFix} from the testkit module + * for efficient fix creation
  • + *
+ * + *

Usage Example

+ *
{@code
+ * // In a JMH benchmark setup method
+ * @Setup(Level.Trial)
+ * public void setup() {
+ *     this.fixer = BenchmarkBootstrap.createChainFixer(10);
+ *     this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, PayloadSize.MEDIUM);
+ * }
+ *
+ * @Benchmark
+ * public void benchmarkMigration(Blackhole blackhole) {
+ *     Dynamic result = fixer.update(
+ *         BenchmarkBootstrap.BENCHMARK_TYPE,
+ *         input,
+ *         new DataVersion(1),
+ *         new DataVersion(11)
+ *     );
+ *     blackhole.consume(result);
+ * }
+ * }
+ * + * @author Erik Pförtner + * @see BenchmarkDataGenerator + * @see PayloadSize + * @see de.splatgames.aether.datafixers.testkit.factory.QuickFix + * @since 1.0.0 + */ +public final class BenchmarkBootstrap { + + /** + * Type reference for generic benchmark data. + * + *

Used by most benchmarks as the default type for test data. The type + * name "benchmark" is intentionally generic to avoid confusion with domain-specific types.

+ */ + public static final TypeReference BENCHMARK_TYPE = new TypeReference("benchmark"); + + /** + * Type reference for player-like benchmark data. + * + *

Used by benchmarks that simulate game player data migrations, + * providing a realistic domain-specific testing scenario.

+ * + * @see #createPlayerFixer() + * @see BenchmarkDataGenerator#generatePlayerData(DynamicOps) + */ + public static final TypeReference PLAYER_TYPE = new TypeReference("player"); + + /** + * Private constructor to prevent instantiation. + */ + private BenchmarkBootstrap() { + // Utility class + } + + /** + * Creates a DataFixer with a single field rename fix (v1 → v2). + * + *

This is the simplest non-trivial fixer configuration, useful for + * measuring baseline single-operation performance. The fix renames a field from "oldName" to "newName".

+ * + *

Version mapping: v1 → v2 (single step)

+ * + * @return a new DataFixer configured for single-fix benchmarks + * @see #createIdentityFixer() + */ + @NotNull + public static DataFixer createSingleFixFixer() { + return new DataFixerBuilder(new DataVersion(2)) + .withDefaultContext(NoOpDataFixerContext.INSTANCE) + .addFix(BENCHMARK_TYPE, QuickFix.renameField( + GsonOps.INSTANCE, + "rename_field_v1_v2", + 1, 2, + "oldName", "newName")) + .build(); + } + + /** + * Creates a DataFixer with an identity fix (no-op transformation). + * + *

The identity fixer passes data through without modification, useful for + * measuring pure framework overhead including:

+ *
    + *
  • Version checking and fix selection
  • + *
  • Dynamic wrapper creation and manipulation
  • + *
  • DataResult monad operations
  • + *
  • Type reference resolution
  • + *
+ * + *

Comparing identity fixer performance against {@link #createSingleFixFixer()} + * reveals the actual cost of field operations versus framework overhead.

+ * + *

Version mapping: v1 → v2 (no data changes)

+ * + * @return a new DataFixer with an identity (pass-through) fix + * @see #createSingleFixFixer() + */ + @NotNull + public static DataFixer createIdentityFixer() { + return new DataFixerBuilder(new DataVersion(2)) + .withDefaultContext(NoOpDataFixerContext.INSTANCE) + .addFix(BENCHMARK_TYPE, QuickFix.identity("identity_v1_v2", 1, 2)) + .build(); + } + + /** + * Creates a DataFixer with a chain of sequential homogeneous fixes. + * + *

Each fix in the chain performs a field rename operation (field1 → field2, + * field2 → field3, etc.), simulating migration scenarios with multiple consecutive version upgrades. This + * configuration is ideal for measuring how migration performance scales with chain length.

+ * + *

Version mapping: v1 → v2 → v3 → ... → v(fixCount+1)

+ * + *

Typical Parameter Values for Benchmarks

+ * + * + * + * + * + * + * + * + *
fixCountScenario
1Baseline (compare with {@link #createSingleFixFixer()})
5Short chain (minor version updates)
10Medium chain (typical upgrade path)
25Long chain (significant version gap)
50Stress test (extended migration)
100Maximum supported (extreme case)
+ * + * @param fixCount the number of fixes in the chain (must be between 1 and 100 inclusive) + * @return a new DataFixer with the specified number of sequential rename fixes + * @throws IllegalArgumentException if fixCount is less than 1 or greater than 100 + * @see #createMixedFixer(int) + */ + @NotNull + public static DataFixer createChainFixer(final int fixCount) { + if (fixCount < 1 || fixCount > 100) { + throw new IllegalArgumentException("fixCount must be between 1 and 100"); + } + + final DataFixerBuilder builder = new DataFixerBuilder(new DataVersion(fixCount + 1)) + .withDefaultContext(NoOpDataFixerContext.INSTANCE); + + for (int i = 0; i < fixCount; i++) { + final int fromVersion = i + 1; + final int toVersion = i + 2; + final DataFix fix = QuickFix.renameField( + GsonOps.INSTANCE, + "rename_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion, + "field" + fromVersion, "field" + toVersion); + builder.addFix(BENCHMARK_TYPE, fix); + } + + return builder.build(); + } + + /** + * Creates a DataFixer with mixed heterogeneous fix types for realistic benchmarking. + * + *

Unlike {@link #createChainFixer(int)} which uses only rename operations, + * this method creates a chain with rotating fix types that more accurately represent real-world migration + * scenarios:

+ * + * + * + * + * + * + * + *
Position (mod 4)Fix TypeOperation
0RenameRenames a field
1AddAdds a new string field with default value
2RemoveRemoves a field
3TransformTransforms field value (string concatenation)
+ * + *

Version mapping: v1 → v2 → v3 → ... → v(fixCount+1)

+ * + *

Comparing mixed fixer performance against chain fixer performance + * reveals the relative cost of different fix operations.

+ * + * @param fixCount the number of fixes in the chain (must be at least 4 to include all fix types) + * @return a new DataFixer with mixed fix types cycling through rename, add, remove, and transform operations + * @throws IllegalArgumentException if fixCount is less than 4 + * @see #createChainFixer(int) + */ + @NotNull + public static DataFixer createMixedFixer(final int fixCount) { + if (fixCount < 4) { + throw new IllegalArgumentException("fixCount must be at least 4 for mixed fixes"); + } + + final DataFixerBuilder builder = new DataFixerBuilder(new DataVersion(fixCount + 1)) + .withDefaultContext(NoOpDataFixerContext.INSTANCE); + + for (int i = 0; i < fixCount; i++) { + final int fromVersion = i + 1; + final int toVersion = i + 2; + final DataFix fix = createMixedFix(fromVersion, toVersion, i % 4); + builder.addFix(BENCHMARK_TYPE, fix); + } + + return builder.build(); + } + + /** + * Creates a DataFixer for player data migration benchmarks. + * + *

This fixer simulates a realistic game player data migration scenario + * with four sequential fixes representing typical schema evolution:

+ * + * + * + * + * + * + * + *
VersionFixDescription
v1 → v2Rename{@code name} → {@code playerName}
v2 → v3AddAdd {@code score} field (default: 0)
v3 → v4TransformDouble the {@code level} value
v4 → v5RemoveRemove {@code tempField}
+ * + *

Use with {@link BenchmarkDataGenerator#generatePlayerData(DynamicOps)} for + * complete domain-specific migration testing.

+ * + * @return a new DataFixer configured for player data migrations (v1 → v5) + * @see #PLAYER_TYPE + * @see BenchmarkDataGenerator#generatePlayerData(DynamicOps) + */ + @NotNull + public static DataFixer createPlayerFixer() { + return new DataFixerBuilder(new DataVersion(5)) + .withDefaultContext(NoOpDataFixerContext.INSTANCE) + .addFix(PLAYER_TYPE, QuickFix.renameField( + GsonOps.INSTANCE, "rename_name_v1_v2", 1, 2, + "name", "playerName")) + .addFix(PLAYER_TYPE, QuickFix.addIntField( + GsonOps.INSTANCE, "add_score_v2_v3", 2, 3, + "score", 0)) + .addFix(PLAYER_TYPE, QuickFix.transformField( + GsonOps.INSTANCE, "double_level_v3_v4", 3, 4, + "level", field -> field.createInt( + field.asInt().result().orElse(1) * 2))) + .addFix(PLAYER_TYPE, QuickFix.removeField( + GsonOps.INSTANCE, "remove_temp_v4_v5", 4, 5, + "tempField")) + .build(); + } + + /** + * Creates a specific fix type based on the fixType selector. + * + *

Internal factory method used by {@link #createMixedFixer(int)} to create + * different fix types in a rotating pattern.

+ * + * @param fromVersion the source version for the fix + * @param toVersion the target version for the fix + * @param fixType the fix type selector (0=rename, 1=add, 2=remove, 3=transform) + * @return a DataFix of the specified type + */ + private static DataFix createMixedFix(final int fromVersion, + final int toVersion, + final int fixType) { + return switch (fixType) { + case 0 -> QuickFix.renameField( + GsonOps.INSTANCE, + "rename_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion, + "renamedField", "renamedField"); + case 1 -> QuickFix.addStringField( + GsonOps.INSTANCE, + "add_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion, + "newField" + toVersion, "default"); + case 2 -> QuickFix.removeField( + GsonOps.INSTANCE, + "remove_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion, + "removedField" + fromVersion); + case 3 -> QuickFix.transformField( + GsonOps.INSTANCE, + "transform_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion, + "transformedField", + field -> field.createString( + field.asString().result().orElse("") + "_transformed")); + default -> QuickFix.identity( + "identity_v" + fromVersion + "_v" + toVersion, + fromVersion, toVersion); + }; + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java new file mode 100644 index 0000000..19a93cc --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.util; + +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.dynamic.DynamicOps; +import de.splatgames.aether.datafixers.testkit.TestData; +import de.splatgames.aether.datafixers.testkit.TestDataBuilder; +import org.jetbrains.annotations.NotNull; + +/** + * Factory for generating benchmark test data with configurable complexity. + * + *

This utility class creates {@link Dynamic} objects of varying sizes and + * structures for use in JMH benchmarks. Data generation is format-agnostic, working with any {@link DynamicOps} + * implementation.

+ * + *

Data Generation Methods

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
MethodStructureUse Case
{@link #generate(DynamicOps, PayloadSize)}Complex (fields + nesting + lists)General-purpose benchmarks
{@link #generatePlayerData(DynamicOps)}Domain-specific (player data)Realistic migration scenarios
{@link #generateFlat(DynamicOps, int)}Flat object (fields only)Basic operation benchmarks
+ * + *

Generated Data Structure

+ *

The main {@link #generate(DynamicOps, PayloadSize)} method creates objects with:

+ *
{@code
+ * {
+ *   "stringField0": "value0",
+ *   "intField0": 0,
+ *   "boolField0": true,
+ *   "stringField1": "value1",
+ *   ...
+ *   "nested": {
+ *     "level": 4,
+ *     "data": "nested-level-4",
+ *     "timestamp": 1234567890,
+ *     "child": {
+ *       "level": 3,
+ *       ...
+ *     }
+ *   },
+ *   "items": [
+ *     {"id": "item-0", "quantity": 1, "active": true},
+ *     {"id": "item-1", "quantity": 2, "active": false},
+ *     ...
+ *   ]
+ * }
+ * }
+ * + *

Design Considerations

+ *
    + *
  • Testkit integration: Uses {@link TestDataBuilder} for fluent, + * type-safe data construction
  • + *
  • Format agnostic: Works with any DynamicOps (Gson, Jackson, YAML, etc.)
  • + *
  • Deterministic: Generated data is fully reproducible for benchmark consistency
  • + *
  • Configurable complexity: {@link PayloadSize} controls data volume
  • + *
+ * + *

Usage Example

+ *
{@code
+ * // In a JMH benchmark
+ * @Setup(Level.Iteration)
+ * public void setup() {
+ *     // Generate medium-complexity test data
+ *     this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, PayloadSize.MEDIUM);
+ *
+ *     // Or generate player-specific data
+ *     this.playerData = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE);
+ * }
+ * }
+ * + * @author Erik Pförtner + * @see PayloadSize + * @see BenchmarkBootstrap + * @see de.splatgames.aether.datafixers.testkit.TestDataBuilder + * @since 1.0.0 + */ +public final class BenchmarkDataGenerator { + + /** + * Fixed timestamp value used for deterministic benchmark data generation. + * + *

Using a constant timestamp ensures reproducible benchmark results + * across different runs, eliminating variability from system time.

+ */ + private static final long FIXED_TIMESTAMP = 1704067200000L; // 2024-01-01 00:00:00 UTC + + /** + * Private constructor to prevent instantiation. + */ + private BenchmarkDataGenerator() { + // Utility class + } + + /** + * Generates benchmark data with the specified payload size and complexity. + * + *

Creates a complex object structure including:

+ *
    + *
  • Primitive fields: String, integer, and boolean fields based on + * {@link PayloadSize#getFieldCount()}
  • + *
  • Nested objects: Recursive nesting up to + * {@link PayloadSize#getNestingDepth()} levels
  • + *
  • List with items: An "items" array with + * {@link PayloadSize#getListSize()} objects
  • + *
+ * + *

Field Naming Patterns

+ * + * + * + * + * + *
Field TypePatternExample
String{@code stringFieldN}{@code stringField0: "value0"}
Integer{@code intFieldN}{@code intField0: 0}
Boolean{@code boolFieldN}{@code boolField0: true}
+ * + * @param ops the DynamicOps implementation to use for data creation + * @param size the payload size configuration controlling data complexity + * @param the underlying value type of the DynamicOps + * @return a new Dynamic containing the generated benchmark data + */ + @NotNull + public static Dynamic generate(@NotNull final DynamicOps ops, + @NotNull final PayloadSize size) { + final TestDataBuilder builder = TestData.using(ops).object(); + + // Add primitive fields + for (int i = 0; i < size.getFieldCount(); i++) { + builder.put("stringField" + i, "value" + i); + builder.put("intField" + i, i * 100); + builder.put("boolField" + i, i % 2 == 0); + } + + // Add nested objects + addNestedObject(builder, "nested", size.getNestingDepth()); + + // Add list with items + builder.putList("items", list -> { + for (int i = 0; i < size.getListSize(); i++) { + final int index = i; + list.addObject(item -> item + .put("id", "item-" + index) + .put("quantity", index + 1) + .put("active", index % 3 == 0)); + } + }); + + return builder.build(); + } + + /** + * Generates a player-like data structure for realistic migration benchmarks. + * + *

Creates a structure simulating game player data, useful for domain-specific + * migration testing with {@link BenchmarkBootstrap#createPlayerFixer()}.

+ * + *

Generated Structure

+ *
{@code
+     * {
+     *   "id": "player-benchmark-12345",
+     *   "name": "BenchmarkPlayer",
+     *   "level": 50,
+     *   "experience": 125000,
+     *   "health": 100.0,
+     *   "active": true,
+     *   "position": {"x": 100.5, "y": 64.0, "z": -200.25, "world": "overworld"},
+     *   "stats": {"strength": 15, "agility": 12, "intelligence": 18, "luck": 7},
+     *   "inventory": [{"slot": 0, "itemId": "minecraft:item_0", "count": 1, "damage": 0}, ...],
+     *   "achievements": ["first_login", "level_10", "level_25", "level_50", ...]
+     * }
+     * }
+ * + *

Data Characteristics

+ * + * + * + * + * + * + *
ComponentCountDescription
Top-level fields6id, name, level, experience, health, active
Nested objects2position (4 fields), stats (4 fields)
Inventory slots36Standard inventory size
Achievements6String list
+ * + * @param ops the DynamicOps implementation to use for data creation + * @param the underlying value type of the DynamicOps + * @return a new Dynamic containing player-like benchmark data + * @see BenchmarkBootstrap#createPlayerFixer() + * @see BenchmarkBootstrap#PLAYER_TYPE + */ + @NotNull + public static Dynamic generatePlayerData(@NotNull final DynamicOps ops) { + return TestData.using(ops) + .object() + .put("id", "player-benchmark-12345") + .put("name", "BenchmarkPlayer") + .put("level", 50) + .put("experience", 125000L) + .put("health", 100.0) + .put("active", true) + .putObject("position", pos -> pos + .put("x", 100.5) + .put("y", 64.0) + .put("z", -200.25) + .put("world", "overworld")) + .putObject("stats", stats -> stats + .put("strength", 15) + .put("agility", 12) + .put("intelligence", 18) + .put("luck", 7)) + .putList("inventory", inv -> { + for (int i = 0; i < 36; i++) { + final int slot = i; + inv.addObject(item -> item + .put("slot", slot) + .put("itemId", "minecraft:item_" + slot) + .put("count", (slot % 64) + 1) + .put("damage", 0)); + } + }) + .putList("achievements", list -> list + .add("first_login") + .add("level_10") + .add("level_25") + .add("level_50") + .add("explorer") + .add("master_crafter")) + .build(); + } + + /** + * Generates a simple flat object with only string fields. + * + *

Creates a minimal object structure without nesting or lists, useful for + * benchmarking basic field access and manipulation operations with minimal traversal overhead.

+ * + *

Generated Structure

+ *
{@code
+     * {
+     *   "field0": "value0",
+     *   "field1": "value1",
+     *   "field2": "value2",
+     *   ...
+     * }
+     * }
+ * + *

This method is useful for isolating field operation costs from + * structural complexity overhead.

+ * + * @param ops the DynamicOps implementation to use for data creation + * @param fieldCount the number of string fields to generate (field0 through field(n-1)) + * @param the underlying value type of the DynamicOps + * @return a new Dynamic containing a flat object with string fields + */ + @NotNull + public static Dynamic generateFlat(@NotNull final DynamicOps ops, + final int fieldCount) { + final TestDataBuilder builder = TestData.using(ops).object(); + for (int i = 0; i < fieldCount; i++) { + builder.put("field" + i, "value" + i); + } + return builder.build(); + } + + /** + * Recursively adds nested object structures to the builder. + * + *

Creates a chain of nested objects, each containing:

+ *
    + *
  • {@code level} - the current nesting depth
  • + *
  • {@code data} - a string identifying the nesting level
  • + *
  • {@code timestamp} - fixed timestamp for reproducibility
  • + *
  • {@code child} - the next nested level (if depth > 0)
  • + *
+ * + * @param builder the TestDataBuilder to add the nested structure to + * @param key the field name for this nested object + * @param depth remaining nesting levels (stops when depth reaches 0) + * @param the underlying value type of the builder + */ + private static void addNestedObject(final TestDataBuilder builder, + final String key, + final int depth) { + if (depth <= 0) { + return; + } + builder.putObject(key, nested -> { + nested.put("level", depth); + nested.put("data", "nested-level-" + depth); + nested.put("timestamp", FIXED_TIMESTAMP); + addNestedObject(nested, "child", depth - 1); + }); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java new file mode 100644 index 0000000..13b56f3 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.util; + +/** + * Defines payload size configurations for benchmark test data generation. + * + *

This enum controls the complexity of data generated by + * {@link BenchmarkDataGenerator#generate(de.splatgames.aether.datafixers.api.dynamic.DynamicOps, PayloadSize)}. + * Each configuration specifies three dimensions of data complexity:

+ * + *
    + *
  • Field count: Number of primitive fields (string, int, boolean triplets)
  • + *
  • Nesting depth: Levels of nested object recursion
  • + *
  • List size: Number of items in the generated list
  • + *
+ * + *

Configuration Summary

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
SizeFieldsNestingList ItemsUse Case
{@link #SMALL}52 levels10Quick iterations, CI pipelines
{@link #MEDIUM}204 levels100Typical performance testing
{@link #LARGE}506 levels1000Stress testing, worst-case analysis
+ * + *

JMH Parameterization

+ *

This enum is designed for use with JMH's {@code @Param} annotation:

+ *
{@code
+ * @Param({"SMALL", "MEDIUM", "LARGE"})
+ * private PayloadSize payloadSize;
+ *
+ * @Setup(Level.Iteration)
+ * public void setup() {
+ *     this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, payloadSize);
+ * }
+ * }
+ * + *

Memory and Performance Impact

+ *

Approximate data characteristics for each size:

+ * + * + * + * + * + *
Size~JSON Size~Object CountTypical Latency
SMALL~2 KB~50Sub-millisecond
MEDIUM~20 KB~500Low milliseconds
LARGE~200 KB~5000Tens of milliseconds
+ * + * @author Erik Pförtner + * @see BenchmarkDataGenerator + * @since 1.0.0 + */ +public enum PayloadSize { + + /** + * Small payload configuration for quick benchmark iterations. + * + *

Generates minimal data suitable for:

+ *
    + *
  • Rapid development feedback loops
  • + *
  • CI/CD pipeline validation
  • + *
  • Baseline measurements with minimal GC impact
  • + *
+ * + *

Configuration: 5 fields, 2 nesting levels, 10 list items

+ */ + SMALL(5, 2, 10), + + /** + * Medium payload configuration for balanced performance testing. + * + *

Generates moderately complex data suitable for:

+ *
    + *
  • Standard benchmark runs
  • + *
  • Typical real-world data volume simulation
  • + *
  • Comparing different implementations
  • + *
+ * + *

Configuration: 20 fields, 4 nesting levels, 100 list items

+ */ + MEDIUM(20, 4, 100), + + /** + * Large payload configuration for stress testing and worst-case analysis. + * + *

Generates substantial data suitable for:

+ *
    + *
  • Memory pressure and GC behavior analysis
  • + *
  • Worst-case performance scenarios
  • + *
  • Scalability limit identification
  • + *
+ * + *

Configuration: 50 fields, 6 nesting levels, 1000 list items

+ * + *

Note: Large payloads may require increased heap size and longer + * warmup periods for stable measurements.

+ */ + LARGE(50, 6, 1000); + + /** + * Number of primitive field triplets (string, int, boolean) to generate. + */ + private final int fieldCount; + + /** + * Maximum depth of nested object recursion. + */ + private final int nestingDepth; + + /** + * Number of items in the generated list. + */ + private final int listSize; + + /** + * Constructs a payload size configuration. + * + * @param fieldCount number of top-level field triplets + * @param nestingDepth maximum nesting levels for nested objects + * @param listSize number of items in generated lists + */ + PayloadSize(final int fieldCount, final int nestingDepth, final int listSize) { + this.fieldCount = fieldCount; + this.nestingDepth = nestingDepth; + this.listSize = listSize; + } + + /** + * Returns the number of primitive field triplets to generate. + * + *

Each field "count" results in three actual fields:

+ *
    + *
  • {@code stringFieldN} - String value
  • + *
  • {@code intFieldN} - Integer value
  • + *
  • {@code boolFieldN} - Boolean value
  • + *
+ * + * @return the number of field triplets (total fields = fieldCount × 3) + */ + public int getFieldCount() { + return this.fieldCount; + } + + /** + * Returns the maximum nesting depth for recursive nested objects. + * + *

A depth of N creates N levels of nested objects, each containing + * a "child" field pointing to the next level until depth reaches 0.

+ * + * @return the maximum nesting depth (0 = no nesting) + */ + public int getNestingDepth() { + return this.nestingDepth; + } + + /** + * Returns the number of items to generate in the "items" list. + * + *

Each item is an object with id, quantity, and active fields.

+ * + * @return the number of list items + */ + public int getListSize() { + return this.listSize; + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/package-info.java new file mode 100644 index 0000000..5673ed4 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/package-info.java @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Utility classes for JMH benchmark infrastructure in the Aether DataFixers framework. + * + *

This package provides the foundational components that all benchmark classes depend on + * for test data generation, DataFixer configuration, and payload management. These utilities + * ensure consistent, reproducible benchmark conditions across different benchmark categories.

+ * + *

Package Contents

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
ClassPurposeUsed By
{@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap}Factory for pre-configured DataFixer instancesAll migration benchmarks
{@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator}Factory for generating test data with configurable complexityAll benchmarks requiring input data
{@link de.splatgames.aether.datafixers.benchmarks.util.PayloadSize}Configuration enum for data complexity levelsJMH {@code @Param} annotations
+ * + *

Design Principles

+ *
    + *
  • Isolation: Utilities are stateless and thread-safe for concurrent benchmark use
  • + *
  • Consistency: All benchmarks use the same data generation logic for fair comparisons
  • + *
  • Configurability: {@link de.splatgames.aether.datafixers.benchmarks.util.PayloadSize} + * allows parameterized benchmarks with different data volumes
  • + *
  • No-op context: All DataFixers use {@code NoOpDataFixerContext} to eliminate + * logging overhead during measurements
  • + *
+ * + *

Typical Usage Pattern

+ *
{@code
+ * @State(Scope.Benchmark)
+ * public class MyBenchmark {
+ *
+ *     @Param({"SMALL", "MEDIUM", "LARGE"})
+ *     private PayloadSize payloadSize;
+ *
+ *     private DataFixer fixer;
+ *     private Dynamic input;
+ *
+ *     @Setup(Level.Trial)
+ *     public void setupTrial() {
+ *         // Create fixer once per trial
+ *         this.fixer = BenchmarkBootstrap.createChainFixer(10);
+ *     }
+ *
+ *     @Setup(Level.Iteration)
+ *     public void setupIteration() {
+ *         // Regenerate data each iteration for consistent GC behavior
+ *         this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, payloadSize);
+ *     }
+ *
+ *     @Benchmark
+ *     public void migrate(Blackhole blackhole) {
+ *         Dynamic result = fixer.update(
+ *             BenchmarkBootstrap.BENCHMARK_TYPE,
+ *             input,
+ *             new DataVersion(1),
+ *             new DataVersion(11)
+ *         );
+ *         blackhole.consume(result);
+ *     }
+ * }
+ * }
+ * + *

Data Fixer Configurations

+ *

{@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap} provides + * several DataFixer configurations for different benchmark scenarios:

+ * + * + * + * + * + * + * + *
ConfigurationFix CountPurpose
Single Fix1Baseline single-operation performance
Identity1 (no-op)Framework overhead measurement
Chain (N)1-100Chain length scaling analysis
Mixed (N)4+Realistic heterogeneous migrations
Player4Domain-specific scenario testing
+ * + *

Payload Size Configurations

+ *

{@link de.splatgames.aether.datafixers.benchmarks.util.PayloadSize} defines three + * complexity levels for generated test data:

+ * + * + * + * + * + *
SizeFieldsNestingList ItemsUse Case
SMALL5210Quick iterations, CI
MEDIUM204100Standard testing
LARGE5061000Stress testing
+ * + *

Integration with Testkit

+ *

This package builds upon the {@code aether-datafixers-testkit} module:

+ *
    + *
  • {@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator} uses + * {@code TestDataBuilder} for fluent data construction
  • + *
  • {@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap} uses + * {@code QuickFix} for efficient fix creation
  • + *
  • Both utilities leverage {@code MockSchemas} for lightweight schema instances
  • + *
+ * + * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap + * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator + * @see de.splatgames.aether.datafixers.benchmarks.util.PayloadSize + * @see de.splatgames.aether.datafixers.testkit + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.util; diff --git a/pom.xml b/pom.xml index 62d1da8..ac74cb0 100644 --- a/pom.xml +++ b/pom.xml @@ -19,6 +19,7 @@ aether-datafixers-examples aether-datafixers-bom aether-datafixers-functional-tests + aether-datafixers-benchmarks @@ -67,6 +68,9 @@ 2.5 + + + 1.37 Aether Datafixers :: Parent @@ -213,6 +217,18 @@ ${spotbugs.version} provided + + + org.openjdk.jmh + jmh-core + ${jmh.version} + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + provided +