diff --git a/.gitignore b/.gitignore
index 5232b33..dcc6e4a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,3 +50,6 @@ bin/
# Claude Code
/.claude/
/CLAUDE.md
+
+# GitHub
+current-ticket.md
\ No newline at end of file
diff --git a/aether-datafixers-benchmarks/pom.xml b/aether-datafixers-benchmarks/pom.xml
new file mode 100644
index 0000000..ef8e7ba
--- /dev/null
+++ b/aether-datafixers-benchmarks/pom.xml
@@ -0,0 +1,174 @@
+
+
This class provides both a command-line interface and programmatic API for + * executing benchmarks. It supports all standard JMH options while providing + * convenient preset configurations for common benchmark scenarios.
+ * + *Quick way to run benchmarks during development without building a JAR:
+ *{@code
+ * # Run all benchmarks with default settings
+ * mvn exec:java -pl aether-datafixers-benchmarks
+ *
+ * # Run with JMH arguments
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="-h"
+ *
+ * # Run specific benchmark pattern
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="SingleFixBenchmark"
+ * }
+ *
+ * Recommended for production benchmark runs with full JMH isolation:
+ *{@code
+ * # Build the fat JAR
+ * mvn clean package -pl aether-datafixers-benchmarks -DskipTests
+ *
+ * # Run all benchmarks
+ * java -jar aether-datafixers-benchmarks/target/aether-datafixers-benchmarks-*-benchmarks.jar
+ *
+ * # Run specific benchmark
+ * java -jar target/*-benchmarks.jar SingleFixBenchmark
+ *
+ * # Run with custom parameters
+ * java -jar target/*-benchmarks.jar -p payloadSize=LARGE -wi 3 -i 5 -f 1
+ *
+ * # Output JSON results for analysis
+ * java -jar target/*-benchmarks.jar -rf json -rff results.json
+ *
+ * # List all available benchmarks
+ * java -jar target/*-benchmarks.jar -l
+ *
+ * # Profile with async-profiler
+ * java -jar target/*-benchmarks.jar -prof async:output=flamegraph
+ * }
+ *
+ * | Category | Benchmarks | Focus |
|---|---|---|
| Core | + *SingleFixBenchmark, MultiFixChainBenchmark, SchemaLookupBenchmark | + *DataFixer migration performance | + *
| Format | + *JsonBenchmark, YamlBenchmark, TomlXmlBenchmark, CrossFormatBenchmark | + *DynamicOps format comparisons | + *
| Codec | + *PrimitiveCodecBenchmark, CollectionCodecBenchmark | + *Serialization/deserialization | + *
| Concurrent | + *ConcurrentMigrationBenchmark | + *Thread-safety and scalability | + *
For integration with test frameworks or custom tooling:
+ *{@code
+ * // Run all benchmarks
+ * BenchmarkRunner.runAllBenchmarks();
+ *
+ * // Run quick validation (CI/CD)
+ * BenchmarkRunner.runQuickBenchmarks();
+ *
+ * // Run only core benchmarks
+ * BenchmarkRunner.runCoreBenchmarks();
+ *
+ * // Run only format benchmarks
+ * BenchmarkRunner.runFormatBenchmarks();
+ * }
+ *
+ * | Setting | Default | Quick Mode |
|---|---|---|
| Warmup iterations | 5 | 2 |
| Measurement iterations | 10 | 3 |
| Forks | 2 | 1 |
| JVM heap | 2 GB | 1 GB |
| Option | Description | Example |
|---|---|---|
| {@code -wi} | Warmup iterations | {@code -wi 3} |
| {@code -i} | Measurement iterations | {@code -i 5} |
| {@code -f} | Number of forks | {@code -f 1} |
| {@code -p} | Parameter value | {@code -p payloadSize=SMALL} |
| {@code -t} | Thread count | {@code -t 4} |
| {@code -rf} | Result format | {@code -rf json} |
| {@code -rff} | Result file | {@code -rff results.json} |
| {@code -l} | List benchmarks | {@code -l} |
| {@code -prof} | Profiler | {@code -prof gc} |
Behavior depends on whether arguments are provided:
+ *Executes every benchmark class in + * {@code de.splatgames.aether.datafixers.benchmarks.*} with production-quality + * settings suitable for reliable performance measurements.
+ * + *Note: Running all benchmarks can take significant time depending + * on the number of parameter combinations. Consider using + * {@link #runQuickBenchmarks()} for validation or {@link #runCoreBenchmarks()} + * for focused testing.
+ * + * @throws RunnerException if benchmark execution fails + * @see #runQuickBenchmarks() + * @see #runCoreBenchmarks() + */ + public static void runAllBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\..*") + .warmupIterations(5) + .measurementIterations(10) + .forks(2) + .jvmArgs("-Xms2G", "-Xmx2G") + .build(); + + new Runner(options).run(); + } + + /** + * Runs a quick subset of benchmarks for fast validation. + * + *Executes only the {@code SingleFixBenchmark} with minimal iterations, + * suitable for:
+ *Warning: Results from quick benchmarks should not be used for + * performance comparisons due to reduced statistical rigor.
+ * + * @throws RunnerException if benchmark execution fails + * @see #runAllBenchmarks() + */ + public static void runQuickBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.core\\.SingleFixBenchmark") + .warmupIterations(2) + .measurementIterations(3) + .forks(1) + .jvmArgs("-Xms1G", "-Xmx1G") + .param("payloadSize", "SMALL") + .build(); + + new Runner(options).run(); + } + + /** + * Runs only the core migration benchmarks. + * + *Executes benchmarks in the {@code core} package that measure DataFixer + * migration performance:
+ *Use this method when focusing on migration performance without + * format-specific or codec overhead considerations.
+ * + * @throws RunnerException if benchmark execution fails + * @see #runFormatBenchmarks() + * @see #runAllBenchmarks() + */ + public static void runCoreBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.core\\..*") + .warmupIterations(5) + .measurementIterations(10) + .forks(2) + .jvmArgs("-Xms2G", "-Xmx2G") + .build(); + + new Runner(options).run(); + } + + /** + * Runs only the format comparison benchmarks. + * + *Executes benchmarks in the {@code format} package that compare different + * DynamicOps implementations:
+ *Use this method when evaluating which DynamicOps implementation + * to use for a specific use case, or when optimizing format handling.
+ * + * @throws RunnerException if benchmark execution fails + * @see #runCoreBenchmarks() + * @see #runAllBenchmarks() + */ + public static void runFormatBenchmarks() throws RunnerException { + final Options options = new OptionsBuilder() + .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.format\\..*") + .warmupIterations(5) + .measurementIterations(10) + .forks(2) + .jvmArgs("-Xms2G", "-Xmx2G") + .build(); + + new Runner(options).run(); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java new file mode 100644 index 0000000..56405aa --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.codec; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.codec.Codec; +import de.splatgames.aether.datafixers.api.codec.Codecs; +import de.splatgames.aether.datafixers.api.result.DataResult; +import de.splatgames.aether.datafixers.api.util.Pair; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for collection codec encode/decode performance. + * + *Measures the performance of list codec operations with parameterized collection + * sizes. These benchmarks reveal how codec performance scales with data volume and + * help identify potential bottlenecks in collection traversal and element processing.
+ * + *Measure {@code List
Measure {@code List
| Parameter | Values | Description |
|---|---|---|
| listSize | 10, 100, 1000 | Number of elements in the test list |
| Setting | Value |
|---|---|
| Warmup | 5 iterations, 1 second each |
| Measurement | 10 iterations, 1 second each |
| Forks | 2 (for JIT variance mitigation) |
| JVM Heap | 2 GB min/max |
| Time Unit | Microseconds (appropriate for collection operations) |
| Collection | Element Pattern | Example (size=3) |
|---|---|---|
| String List | {@code "item-" + index} | ["item-0", "item-1", "item-2"] |
| Integer List | {@code index} | [0, 1, 2] |
{@code
+ * # Run all collection codec benchmarks
+ * java -jar benchmarks.jar CollectionCodecBenchmark
+ *
+ * # Run with specific list size
+ * java -jar benchmarks.jar CollectionCodecBenchmark -p listSize=1000
+ *
+ * # Run only string list benchmarks
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.*String.*"
+ *
+ * # Run only encode benchmarks
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.encode.*"
+ *
+ * # Compare direct vs functional round-trip
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.roundTrip.*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar CollectionCodecBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate JSON report for analysis
+ * java -jar benchmarks.jar CollectionCodecBenchmark -rf json -rff collection_results.json
+ * }
+ *
+ * @author Erik Pförtner
+ * @see PrimitiveCodecBenchmark
+ * @see de.splatgames.aether.datafixers.api.codec.Codecs#list(Codec)
+ * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class CollectionCodecBenchmark {
+
+ /**
+ * The number of elements in test lists, injected by JMH.
+ *
+ * This parameter controls the size of both string and integer lists. + * Different sizes reveal scaling characteristics of the list codec:
+ *GsonOps is used as the reference JSON implementation for benchmarks.
+ */ + private GsonOps ops; + + /** + * Codec for encoding/decoding {@code ListCreated via {@link Codecs#list(Codec)} wrapping {@link Codecs#STRING}.
+ */ + private CodecCreated via {@link Codecs#list(Codec)} wrapping {@link Codecs#INT}.
+ */ + private CodecElements follow the pattern "item-0", "item-1", ..., "item-(n-1)".
+ */ + private ListElements are sequential integers: 0, 1, 2, ..., (n-1).
+ */ + private ListCreated during setup to isolate decode performance from encoding overhead.
+ */ + private JsonElement encodedStringList; + + /** + * Pre-encoded JSON array for integer list decode benchmarks. + * + *Created during setup to isolate decode performance from encoding overhead.
+ */ + private JsonElement encodedIntList; + + /** + * Initializes codecs, test data, and pre-encoded JSON elements. + * + *This setup method:
+ *Using {@link ArrayList} with pre-sized capacity avoids resizing overhead + * during population.
+ */ + @Setup(Level.Trial) + public void setup() { + this.ops = GsonOps.INSTANCE; + + this.stringListCodec = Codecs.list(Codecs.STRING); + this.intListCodec = Codecs.list(Codecs.INT); + + this.stringList = new ArrayList<>(this.listSize); + this.intList = new ArrayList<>(this.listSize); + + for (int i = 0; i < this.listSize; i++) { + this.stringList.add("item-" + i); + this.intList.add(i); + } + + this.encodedStringList = this.stringListCodec.encodeStart(this.ops, this.stringList) + .result().orElseThrow(); + this.encodedIntList = this.intListCodec.encodeStart(this.ops, this.intList) + .result().orElseThrow(); + } + + // ==================== String List Benchmarks ==================== + + /** + * Benchmarks string list encoding to JSON array. + * + *Measures the performance of converting a {@code List
Performance factors:
+ *Measures the performance of extracting a {@code List
Performance factors:
+ *Measures the performance of converting a {@code List
Measures the performance of extracting a {@code List
Measures the combined performance of encoding a {@code List
This benchmark is useful for scenarios where data is temporarily serialized + * (e.g., caching, message passing) and immediately deserialized.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripStringListDirect(final Blackhole blackhole) { + final JsonElement json = this.stringListCodec.encodeStart(this.ops, this.stringList) + .result().orElseThrow(); + final PairMeasures the combined performance of encoding a {@code List
Measures the combined performance of encoding and decoding using + * {@link DataResult#flatMap} for monadic composition. This represents + * the functional programming style where operations are chained without + * explicit result unwrapping.
+ * + *Comparing with {@link #roundTripStringListDirect} reveals the overhead + * (if any) of the functional API approach versus direct extraction.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripStringListFunctional(final Blackhole blackhole) { + final DataResultMeasures the combined performance of encoding and decoding using + * monadic composition via {@link DataResult#flatMap}.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripIntListFunctional(final Blackhole blackhole) { + final DataResultMeasures the baseline performance of the fundamental codec operations for + * primitive Java types. These benchmarks establish the lower bound for codec + * performance and help identify overhead introduced by more complex codec + * compositions.
+ * + *Measure Java value to JSON element conversion:
+ *Measure JSON element to Java value conversion:
+ *Measure complete encode-then-decode cycles:
+ *| Setting | Value |
|---|---|
| Warmup | 5 iterations, 1 second each |
| Measurement | 10 iterations, 1 second each |
| Forks | 2 (for JIT variance mitigation) |
| JVM Heap | 2 GB min/max |
| Time Unit | Nanoseconds (for fine-grained primitive ops) |
| Type | Value | Notes |
|---|---|---|
| boolean | {@code true} | Single bit representation |
| int | {@code 42} | Small positive integer |
| long | {@code 123456789L} | Value exceeding int range representation |
| float | {@code 3.14159f} | Pi approximation (tests decimal handling) |
| double | {@code 2.718281828} | Euler's number (tests precision) |
| String | {@code "benchmark-test-string"} | 21-character ASCII string |
{@code
+ * # Run all primitive codec benchmarks
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark
+ *
+ * # Run only encode benchmarks
+ * java -jar benchmarks.jar "PrimitiveCodecBenchmark.encode.*"
+ *
+ * # Run only decode benchmarks
+ * java -jar benchmarks.jar "PrimitiveCodecBenchmark.decode.*"
+ *
+ * # Compare specific types
+ * java -jar benchmarks.jar "PrimitiveCodecBenchmark.*(Int|Long).*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate CSV for spreadsheet analysis
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark -rf csv -rff primitive_results.csv
+ * }
+ *
+ * @author Erik Pförtner
+ * @see CollectionCodecBenchmark
+ * @see de.splatgames.aether.datafixers.api.codec.Codecs
+ * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class PrimitiveCodecBenchmark {
+
+ /**
+ * Test boolean value for encoding benchmarks.
+ */
+ private static final boolean TEST_BOOL = true;
+
+ /**
+ * Test integer value for encoding benchmarks.
+ *
+ * A small positive integer that fits in a single JSON number token.
+ */ + private static final int TEST_INT = 42; + + /** + * Test long value for encoding benchmarks. + * + *A value that exceeds typical int range to test long-specific handling.
+ */ + private static final long TEST_LONG = 123456789L; + + /** + * Test float value for encoding benchmarks. + * + *Pi approximation to test decimal point handling and precision.
+ */ + private static final float TEST_FLOAT = 3.14159f; + + /** + * Test double value for encoding benchmarks. + * + *Euler's number with extended precision to test double encoding accuracy.
+ */ + private static final double TEST_DOUBLE = 2.718281828; + + /** + * Test string value for encoding benchmarks. + * + *A 21-character ASCII string representing typical field values.
+ */ + private static final String TEST_STRING = "benchmark-test-string"; + + /** + * The DynamicOps implementation used for all codec operations. + * + *GsonOps is used as the reference implementation for JSON format benchmarks.
+ */ + private GsonOps ops; + + /** + * Pre-encoded boolean JSON element for decode benchmarks. + */ + private JsonElement encodedBool; + + /** + * Pre-encoded integer JSON element for decode benchmarks. + */ + private JsonElement encodedInt; + + /** + * Pre-encoded long JSON element for decode benchmarks. + */ + private JsonElement encodedLong; + + /** + * Pre-encoded float JSON element for decode benchmarks. + */ + private JsonElement encodedFloat; + + /** + * Pre-encoded double JSON element for decode benchmarks. + */ + private JsonElement encodedDouble; + + /** + * Pre-encoded string JSON element for decode benchmarks. + */ + private JsonElement encodedString; + + /** + * Initializes pre-encoded JSON elements for decode benchmarks. + * + *Pre-encoding ensures decode benchmarks measure only decoding performance + * without encoding overhead. All test values are encoded once at trial start.
+ */ + @Setup(Level.Trial) + public void setup() { + this.ops = GsonOps.INSTANCE; + + this.encodedBool = Codecs.BOOL.encodeStart(this.ops, TEST_BOOL).result().orElseThrow(); + this.encodedInt = Codecs.INT.encodeStart(this.ops, TEST_INT).result().orElseThrow(); + this.encodedLong = Codecs.LONG.encodeStart(this.ops, TEST_LONG).result().orElseThrow(); + this.encodedFloat = Codecs.FLOAT.encodeStart(this.ops, TEST_FLOAT).result().orElseThrow(); + this.encodedDouble = Codecs.DOUBLE.encodeStart(this.ops, TEST_DOUBLE).result().orElseThrow(); + this.encodedString = Codecs.STRING.encodeStart(this.ops, TEST_STRING).result().orElseThrow(); + } + + // ==================== Boolean Benchmarks ==================== + + /** + * Benchmarks boolean value encoding to JSON. + * + *Measures the performance of converting a Java {@code boolean} to a + * JSON boolean element via {@link Codecs#BOOL}.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeBool(final Blackhole blackhole) { + final DataResultMeasures the performance of extracting a Java {@code Boolean} from a + * pre-encoded JSON boolean element.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeBool(final Blackhole blackhole) { + final DataResultMeasures the performance of converting a Java {@code int} to a + * JSON number element via {@link Codecs#INT}.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeInt(final Blackhole blackhole) { + final DataResultMeasures the performance of extracting a Java {@code Integer} from a + * pre-encoded JSON number element.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeInt(final Blackhole blackhole) { + final DataResultMeasures the performance of converting a Java {@code long} to a + * JSON number element via {@link Codecs#LONG}.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeLong(final Blackhole blackhole) { + final DataResultMeasures the performance of extracting a Java {@code Long} from a + * pre-encoded JSON number element.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeLong(final Blackhole blackhole) { + final DataResultMeasures the performance of converting a Java {@code float} to a + * JSON number element via {@link Codecs#FLOAT}. Float encoding involves + * decimal representation which may differ from integer encoding.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeFloat(final Blackhole blackhole) { + final DataResultMeasures the performance of extracting a Java {@code Float} from a + * pre-encoded JSON number element. Decoding involves parsing the decimal + * representation back to IEEE 754 single-precision format.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeFloat(final Blackhole blackhole) { + final DataResultMeasures the performance of converting a Java {@code double} to a + * JSON number element via {@link Codecs#DOUBLE}. Double encoding preserves + * higher precision than float but uses similar mechanisms.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeDouble(final Blackhole blackhole) { + final DataResultMeasures the performance of extracting a Java {@code Double} from a + * pre-encoded JSON number element.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeDouble(final Blackhole blackhole) { + final DataResultMeasures the performance of converting a Java {@code String} to a + * JSON string element via {@link Codecs#STRING}. String encoding may involve + * escape sequence handling for special characters.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void encodeString(final Blackhole blackhole) { + final DataResultMeasures the performance of extracting a Java {@code String} from a + * pre-encoded JSON string element.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void decodeString(final Blackhole blackhole) { + final DataResultMeasures the combined performance of encoding a Java {@code int} to JSON + * and immediately decoding it back. Uses direct result extraction via + * {@code result().orElseThrow()} to measure the typical non-functional usage pattern.
+ * + *Round-trip performance is important for scenarios where data is temporarily + * serialized (e.g., caching, IPC) and immediately deserialized.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripIntDirect(final Blackhole blackhole) { + final JsonElement json = Codecs.INT.encodeStart(this.ops, TEST_INT).result().orElseThrow(); + final PairMeasures the combined performance of encoding a Java {@code String} to JSON + * and immediately decoding it back. String round-trips may involve additional + * overhead from string object creation compared to primitive numeric types.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void roundTripStringDirect(final Blackhole blackhole) { + final JsonElement json = Codecs.STRING.encodeStart(this.ops, TEST_STRING).result().orElseThrow(); + final PairThis package contains benchmarks that measure the performance of codec operations, + * including encoding (Java objects to serialized format) and decoding (serialized format + * to Java objects). These benchmarks establish baseline performance for the codec system + * and help identify bottlenecks in serialization pipelines.
+ * + *| Class | + *Focus Area | + *Key Metrics | + *
|---|---|---|
| {@link de.splatgames.aether.datafixers.benchmarks.codec.PrimitiveCodecBenchmark} | + *Primitive type codecs (bool, int, long, float, double, string) | + *Baseline encode/decode latency, round-trip overhead | + *
| {@link de.splatgames.aether.datafixers.benchmarks.codec.CollectionCodecBenchmark} | + *Collection codecs (List<String>, List<Integer>) | + *Scaling with collection size, functional vs direct API overhead | + *
Codecs are fundamental to the DataFixer system, transforming data between typed + * Java objects and format-agnostic {@link de.splatgames.aether.datafixers.api.dynamic.Dynamic} + * representations. Understanding codec performance is essential for:
+ *{@code
+ * # Run all codec benchmarks
+ * java -jar benchmarks.jar ".*codec.*"
+ *
+ * # Run only primitive codec benchmarks
+ * java -jar benchmarks.jar PrimitiveCodecBenchmark
+ *
+ * # Run only collection codec benchmarks
+ * java -jar benchmarks.jar CollectionCodecBenchmark
+ *
+ * # Run encode-only benchmarks across all codec types
+ * java -jar benchmarks.jar ".*codec.*encode.*"
+ *
+ * # Run decode-only benchmarks
+ * java -jar benchmarks.jar ".*codec.*decode.*"
+ *
+ * # Run round-trip benchmarks
+ * java -jar benchmarks.jar ".*codec.*roundTrip.*"
+ *
+ * # Quick validation with reduced iterations
+ * java -jar benchmarks.jar ".*codec.*" -wi 1 -i 1 -f 1
+ *
+ * # Generate CSV report for analysis
+ * java -jar benchmarks.jar ".*codec.*" -rf csv -rff codec_results.csv
+ * }
+ *
+ * | Observation | Meaning | Action |
|---|---|---|
| Encode slower than decode | + *JSON element construction more expensive than extraction | + *Consider caching encoded results if reused | + *
| Decode slower than encode | + *Type parsing/validation overhead dominates | + *Review type conversion logic | + *
| Super-linear collection scaling | + *GC pressure or algorithmic inefficiency | + *Profile memory allocation; consider streaming | + *
| Functional API slower than direct | + *Lambda/closure overhead measurable | + *Use direct extraction for hot paths | + *
| String codec slower than numeric | + *String allocation/interning overhead | + *Expected; no action needed | + *
Codec benchmarks complement other benchmark packages:
+ *These benchmarks use {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps} + * as the reference DynamicOps implementation. The codec system supports multiple formats:
+ *Future benchmarks may compare performance across different DynamicOps implementations.
+ * + * @see de.splatgames.aether.datafixers.benchmarks.codec.PrimitiveCodecBenchmark + * @see de.splatgames.aether.datafixers.benchmarks.codec.CollectionCodecBenchmark + * @see de.splatgames.aether.datafixers.api.codec.Codec + * @see de.splatgames.aether.datafixers.api.codec.Codecs + * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps + * @since 1.0.0 + */ +package de.splatgames.aether.datafixers.benchmarks.codec; diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java new file mode 100644 index 0000000..a1830bf --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java @@ -0,0 +1,601 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.concurrent; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.api.schema.Schema; +import de.splatgames.aether.datafixers.api.schema.SchemaRegistry; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.core.schema.SimpleSchemaRegistry; +import de.splatgames.aether.datafixers.testkit.factory.MockSchemas; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.SplittableRandom; +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for concurrent DataFixer operations and thread-safety validation. + * + *This benchmark measures the performance characteristics of the DataFixer system + * under concurrent load. It validates thread-safety of shared components and quantifies + * scalability across different thread counts. The results help identify contention + * points and ensure the framework performs well in multi-threaded environments.
+ * + *Measure DataFixer performance when multiple threads perform migrations simultaneously:
+ *Measure SchemaRegistry performance under concurrent read pressure:
+ *| Benchmark | Threads | Purpose |
|---|---|---|
| concurrentSingleFix | MAX (all available) | Maximum contention stress test |
| concurrentChainMigration | MAX | Chain migration under full load |
| fourThreadMigration | 4 | Typical server scenario baseline |
| eightThreadMigration | 8 | Higher parallelism scaling point |
| concurrentRegistryLookup | MAX | Registry contention stress test |
| concurrentLatestLookup | MAX | Hot path contention analysis |
| Parameter | Values | Description |
|---|---|---|
| payloadSize | SMALL, MEDIUM | Input data complexity per thread |
| Setting | Value |
|---|---|
| Warmup | 3 iterations, 2 seconds each |
| Measurement | 5 iterations, 2 seconds each |
| Forks | 2 (for JIT variance mitigation) |
| JVM Heap | 2 GB min/max |
| Time Unit | Microseconds |
This benchmark uses two JMH state classes to properly isolate shared and + * thread-local data:
+ *{@code
+ * # Run all concurrent benchmarks
+ * java -jar benchmarks.jar ".*concurrent.*"
+ *
+ * # Run with specific thread count override
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -t 16
+ *
+ * # Run registry-only benchmarks
+ * java -jar benchmarks.jar ".*concurrent.*Lookup.*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate JSON report for analysis
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -rf json -rff concurrent_results.json
+ *
+ * # Profile with async-profiler integration
+ * java -jar benchmarks.jar ConcurrentMigrationBenchmark -prof async:output=flamegraph
+ * }
+ *
+ * @author Erik Pförtner
+ * @see de.splatgames.aether.datafixers.benchmarks.core.SingleFixBenchmark
+ * @see de.splatgames.aether.datafixers.benchmarks.core.MultiFixChainBenchmark
+ * @see BenchmarkBootstrap
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 3, time = 2, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 5, time = 2, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class ConcurrentMigrationBenchmark {
+
+ // ==================== Concurrent Migration Benchmarks ====================
+
+ /**
+ * Benchmarks concurrent single-fix migrations with maximum thread parallelism.
+ *
+ * All available CPU threads simultaneously apply a single DataFix to their + * respective input data. This benchmark stress-tests the thread-safety of the + * DataFixer implementation and measures maximum achievable throughput.
+ * + *Key aspects measured:
+ *All available CPU threads simultaneously apply a 10-fix chain migration. + * This benchmark combines the stress of concurrent access with the complexity + * of multi-step migrations, revealing performance characteristics under + * realistic high-load scenarios.
+ * + *Compared to {@link #concurrentSingleFix}, this benchmark:
+ *Provides a fixed-thread baseline for comparing against variable-thread + * benchmarks. Four threads represent a typical server core count and help + * establish scaling characteristics between single-threaded and maximum + * parallelism scenarios.
+ * + *Use this benchmark to:
+ *Tests scaling beyond the 4-thread baseline. Eight threads represent + * a common server configuration and help identify whether the DataFixer + * implementation scales efficiently with additional parallelism.
+ * + *Scaling analysis:
+ *All available threads perform random version lookups against a shared + * {@link SchemaRegistry} containing 100 schema versions. This benchmark + * validates the thread-safety and performance of registry read operations + * under heavy concurrent access.
+ * + *The benchmark uses pre-computed random indices (via {@link ThreadState#nextRegistryIndex()}) + * to avoid RNG contention affecting measurements. Each thread cycles through + * a 1024-element buffer of random indices.
+ * + *Performance expectations:
+ *All available threads repeatedly call {@link SchemaRegistry#latest()} + * on a shared registry. This represents the "hot path" optimization where + * applications frequently need the most recent schema version.
+ * + *This benchmark helps validate:
+ *Expected to outperform {@link #concurrentRegistryLookup} due to:
+ *This state class contains all resources that are shared across benchmark + * threads, simulating real-world scenarios where a single DataFixer instance + * serves multiple concurrent requests.
+ * + *State initialization occurs once per trial (before warmup begins) to + * ensure consistent starting conditions across all measurement iterations.
+ * + *Controls the complexity of generated test data for each thread. + * Only SMALL and MEDIUM sizes are used to balance benchmark runtime + * with meaningful performance differentiation.
+ * + * @see PayloadSize + */ + @Param({"SMALL", "MEDIUM"}) + public PayloadSize payloadSize; + + /** + * Shared DataFixer configured with a single fix (v1 → v2). + * + *Used by migration benchmarks that measure basic concurrent + * fix application without chain traversal overhead.
+ */ + public DataFixer sharedFixer; + + /** + * Shared DataFixer configured with a 10-fix chain (v1 → v11). + * + *Used by {@link #concurrentChainMigration} to measure concurrent + * performance when applying multiple sequential fixes.
+ */ + public DataFixer sharedChainFixer; + + /** + * Shared SchemaRegistry containing 100 schema versions. + * + *The registry is frozen after population to ensure thread-safe + * read access during benchmarks. Versions range from 10 to 1000 + * in increments of 10.
+ */ + public SchemaRegistry sharedRegistry; + + /** + * Source version for all migrations (v1). + */ + public DataVersion fromVersion; + + /** + * Target version for single-fix migrations (v2). + */ + public DataVersion toVersion; + + /** + * Target version for chain migrations (v11). + */ + public DataVersion chainToVersion; + + /** + * Pre-computed DataVersion array for registry lookup benchmarks. + * + *Contains 100 versions (10, 20, 30, ..., 1000) matching the + * schemas registered in {@link #sharedRegistry}. Pre-allocation + * avoids DataVersion object creation during measurement.
+ */ + public DataVersion[] registryVersions; + + /** + * Initializes all shared benchmark state. + * + *Creates DataFixer instances, populates the SchemaRegistry with + * 100 versions, and pre-computes all version constants. The registry + * is frozen after population to enable lock-free concurrent reads.
+ */ + @Setup(Level.Trial) + public void setup() { + this.sharedFixer = BenchmarkBootstrap.createSingleFixFixer(); + this.sharedChainFixer = BenchmarkBootstrap.createChainFixer(10); + + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + this.chainToVersion = new DataVersion(11); + + final SimpleSchemaRegistry registry = new SimpleSchemaRegistry(); + this.registryVersions = new DataVersion[100]; + for (int i = 0; i < 100; i++) { + final int version = (i + 1) * 10; + this.registryVersions[i] = new DataVersion(version); + registry.register(MockSchemas.minimal(version)); + } + registry.freeze(); + this.sharedRegistry = registry; + } + } + + /** + * Per-thread benchmark state for isolated data and random access patterns. + * + *This state class provides each benchmark thread with its own input data + * and random number generator to eliminate false sharing and contention on + * thread-local operations.
+ * + *The {@link #registryIndexBuffer} uses a power-of-two size (1024) with + * bitwise AND masking for efficient wraparound without modulo operations. + * This provides pseudo-random access patterns while minimizing measurement + * overhead.
+ */ + @State(Scope.Thread) + public static class ThreadState { + + /** + * Size of the pre-computed random index buffer. + * + *Power of two (1024) enables efficient wraparound via bitwise AND. + * Large enough to avoid pattern repetition affecting cache behavior + * during typical measurement windows.
+ */ + private static final int INDEX_BUFFER_SIZE = 1024; + + /** + * Bitmask for efficient modulo operation on buffer index. + * + *Used as {@code cursor & INDEX_MASK} instead of {@code cursor % INDEX_BUFFER_SIZE} + * for faster wraparound calculation.
+ */ + private static final int INDEX_MASK = INDEX_BUFFER_SIZE - 1; + + /** + * Pre-computed random indices for registry lookup benchmarks. + * + *Populated during iteration setup with random values in range + * [0, registryVersions.length). Accessed via {@link #nextRegistryIndex()}.
+ */ + private final int[] registryIndexBuffer = new int[INDEX_BUFFER_SIZE]; + + /** + * Per-thread input data for migration benchmarks. + * + *Regenerated at each iteration to ensure consistent memory allocation + * patterns and prevent cross-iteration caching effects.
+ */ + public DynamicIncremented on each call to {@link #nextRegistryIndex()} and + * wrapped using {@link #INDEX_MASK}.
+ */ + private int registryCursor; + + /** + * Per-thread random number generator. + * + *{@link SplittableRandom} is used instead of {@link java.util.Random} + * because it is faster and does not use atomic operations, eliminating + * contention when multiple threads generate random numbers.
+ */ + private SplittableRandom random; + + /** + * Initializes the per-thread random number generator. + * + *Called once per trial. Uses a fixed seed (42) for reproducibility + * across benchmark runs, though each thread will produce different + * sequences due to {@link SplittableRandom}'s splittable nature.
+ */ + @Setup(Level.Trial) + public void setupTrial() { + // Per-thread RNG avoids contention and is faster than java.util.Random. + this.random = new SplittableRandom(42L); + } + + /** + * Regenerates input data and random indices for each iteration. + * + *Fresh data generation per iteration ensures:
+ *The random index buffer is refilled with new random values to + * vary the registry access pattern across iterations.
+ * + * @param s the shared benchmark state providing payload size and version array + */ + @Setup(Level.Iteration) + public void setupIteration(final BenchmarkState s) { + this.threadInput = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, s.payloadSize); + + for (int i = 0; i < INDEX_BUFFER_SIZE; i++) { + this.registryIndexBuffer[i] = this.random.nextInt(s.registryVersions.length); + } + this.registryCursor = 0; + } + + /** + * Returns the next pre-computed random index for registry lookups. + * + *Retrieves the next value from {@link #registryIndexBuffer} and + * advances the cursor with efficient bitwise wraparound. This method + * is called during measurement and is optimized to minimize overhead.
+ * + * @return a random index in range [0, registryVersions.length) + */ + public int nextRegistryIndex() { + return this.registryIndexBuffer[this.registryCursor++ & INDEX_MASK]; + } + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java new file mode 100644 index 0000000..9b374ee --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Concurrency-focused JMH benchmarks for the Aether DataFixers framework. + * + *This package contains benchmarks that measure performance characteristics under + * concurrent load. These benchmarks validate thread-safety of the DataFixer system, + * identify contention points, and quantify scalability across different thread counts.
+ * + *| Class | + *Focus Area | + *Key Metrics | + *
|---|---|---|
| {@link de.splatgames.aether.datafixers.benchmarks.concurrent.ConcurrentMigrationBenchmark} | + *Multi-threaded migration and registry access | + *Throughput scaling, contention overhead, thread-safety validation | + *
Single-threaded benchmarks measure raw operation performance, but real-world + * applications often use the DataFixer system from multiple threads simultaneously. + * Concurrent benchmarks reveal:
+ *{@code
+ * # Run all concurrent benchmarks with maximum threads
+ * java -jar benchmarks.jar ".*concurrent.*"
+ *
+ * # Run with specific thread count
+ * java -jar benchmarks.jar ".*concurrent.*" -t 8
+ *
+ * # Quick validation with reduced iterations
+ * java -jar benchmarks.jar ".*concurrent.*" -wi 1 -i 1 -f 1
+ *
+ * # Generate detailed JSON report
+ * java -jar benchmarks.jar ".*concurrent.*" -rf json -rff concurrent_results.json
+ *
+ * # Profile lock contention with JFR
+ * java -jar benchmarks.jar ".*concurrent.*" -prof jfr
+ * }
+ *
+ * Concurrent benchmark results require careful interpretation:
+ *| Pattern | Meaning | Action |
|---|---|---|
| Linear throughput scaling | + *No contention; excellent parallelism | + *None needed | + *
| Sub-linear scaling | + *Some contention; typical for shared resources | + *Acceptable; monitor for degradation | + *
| Throughput plateau | + *Saturation point reached | + *Identify bottleneck (CPU, memory, locks) | + *
| Throughput degradation | + *Severe contention; adding threads hurts | + *Investigate locking; consider lock-free alternatives | + *
| High variance (± error) | + *GC pauses, lock contention, or scheduling | + *Profile with async-profiler or JFR | + *
The {@link de.splatgames.aether.datafixers.benchmarks.core core} package + * measures single-threaded baseline performance. Use concurrent benchmarks to:
+ *Measures how fix chain length affects migration performance. This benchmark + * is essential for understanding the scalability characteristics of the DataFixer + * system when applying multiple sequential fixes.
+ * + *| Parameter | Values | Description |
|---|---|---|
| fixCount | 1, 5, 10, 25, 50 | Number of fixes in the chain |
| payloadSize | SMALL, MEDIUM | Input data complexity |
| Setting | Value |
|---|---|
| Warmup | 5 iterations, 1 second each |
| Measurement | 10 iterations, 1 second each |
| Forks | 2 (for statistical significance) |
| JVM Heap | 2 GB min/max |
| Time Unit | Microseconds |
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar MultiFixChainBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar MultiFixChainBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific fix count and payload size
+ * java -jar benchmarks.jar MultiFixChainBenchmark -p fixCount=10 -p payloadSize=SMALL
+ *
+ * # Generate CSV output for analysis
+ * java -jar benchmarks.jar MultiFixChainBenchmark -rf csv -rff chain_results.csv
+ * }
+ *
+ * @author Erik Pförtner
+ * @see SingleFixBenchmark
+ * @see BenchmarkBootstrap#createChainFixer(int)
+ * @see BenchmarkBootstrap#createMixedFixer(int)
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class MultiFixChainBenchmark {
+
+ /**
+ * The number of fixes in the chain, injected by JMH.
+ *
+ * This parameter controls the length of the fix chain being benchmarked. + * Higher values test the system's ability to handle long migration paths + * efficiently.
+ * + *Controls the complexity of generated test data. Only SMALL and MEDIUM + * sizes are used to keep benchmark runtime reasonable while still capturing + * scaling behavior.
+ * + * @see PayloadSize + */ + @Param({"SMALL", "MEDIUM"}) + private PayloadSize payloadSize; + + /** + * DataFixer configured with a chain of homogeneous field rename fixes. + * + *Each fix in the chain performs a simple field rename operation (v{@code n} → v{@code n+1}). + * This represents the best-case scenario for chain execution.
+ */ + private DataFixer chainFixer; + + /** + * DataFixer configured with a chain of heterogeneous fix operations. + * + *The chain includes a mix of rename, add, and transform operations to + * simulate realistic migration scenarios. Falls back to {@link #chainFixer} + * if mixed fixer creation fails.
+ */ + private DataFixer mixedFixer; + + /** + * Pre-generated input data matching {@link #payloadSize}. + * + *Regenerated at each iteration to ensure consistent GC behavior + * and avoid caching effects.
+ */ + private DynamicUsed by {@link #partialChain} to measure performance when only part + * of the available fixes are applied.
+ */ + private DataVersion halfwayToVersion; + + /** + * Initializes the benchmark state once per trial. + * + *Creates the chain and mixed fixers based on the current {@link #fixCount} + * parameter. Also calculates the version bounds for full and partial chain + * execution.
+ * + *If mixed fixer creation fails (e.g., due to unsupported operations), + * the chain fixer is used as a fallback to ensure the benchmark can still run.
+ */ + @Setup(Level.Trial) + public void setupTrial() { + this.chainFixer = BenchmarkBootstrap.createChainFixer(this.fixCount); + + try { + this.mixedFixer = BenchmarkBootstrap.createMixedFixer(this.fixCount); + } catch (final RuntimeException ex) { + this.mixedFixer = this.chainFixer; + } + + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(this.fixCount + 1); + + final int halfwayVersion = Math.max(2, (this.fixCount / 2) + 1); + this.halfwayToVersion = new DataVersion(halfwayVersion); + } + + /** + * Regenerates input data at each iteration. + * + *Fresh data generation per iteration ensures that:
+ *Measures the performance of applying {@link #fixCount} sequential rename + * fixes to migrate data from v1 to v{@code fixCount+1}. This represents an + * optimistic scenario where all fixes perform the same lightweight operation.
+ * + *Use this benchmark to establish baseline chain performance and detect + * any non-linear scaling behavior in the fix application pipeline.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void renameChain(final Blackhole blackhole) { + final DynamicMeasures the performance of applying {@link #fixCount} sequential fixes + * that include a mix of operations:
+ *This benchmark provides a more realistic performance profile compared + * to {@link #renameChain}, as real-world migrations typically involve + * diverse operations.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void mixedChain(final Blackhole blackhole) { + final DynamicMeasures the performance of applying only half of the available fixes + * in the chain. This simulates scenarios where:
+ *Comparing this benchmark with {@link #renameChain} reveals whether + * fix selection and version range calculations add significant overhead.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void partialChain(final Blackhole blackhole) { + final DynamicMeasures the overhead of various schema lookup operations as registry size grows. + * Schema lookups are performed frequently during data migration, so their performance directly impacts overall + * migration throughput.
+ * + *| Parameter | Values | Description |
|---|---|---|
| schemaCount | 10, 50, 100, 500 | Number of schemas in the registry |
| Setting | Value |
|---|---|
| Warmup | 5 iterations, 1 second each |
| Measurement | 10 iterations, 1 second each |
| Forks | 2 (for statistical significance) |
| JVM Heap | 2 GB min/max |
| Time Unit | Nanoseconds |
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar SchemaLookupBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar SchemaLookupBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific schema count only
+ * java -jar benchmarks.jar SchemaLookupBenchmark -p schemaCount=100
+ *
+ * # Run specific lookup benchmark
+ * java -jar benchmarks.jar SchemaLookupBenchmark.exactLookup
+ * }
+ *
+ * @author Erik Pförtner
+ * @see SchemaRegistry
+ * @see SimpleSchemaRegistry
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class SchemaLookupBenchmark {
+
+ /**
+ * Benchmarks exact version lookup performance.
+ *
+ * Measures the time to retrieve a schema by its exact registered version. + * This is the most common lookup pattern during migration when the source version is known precisely.
+ * + *The benchmark uses pre-generated random indices to avoid RNG overhead + * in the measurement loop. Each invocation looks up a different random version to prevent branch prediction + * optimization.
+ * + * @param s the shared benchmark state containing the registry and versions + * @param t the per-thread state providing random lookup indices + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void exactLookup(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final int index = t.nextExactIndex(); + final Schema schema = s.registry.get(s.versions[index]); + blackhole.consume(schema); + } + + /** + * Benchmarks floor lookup performance. + * + *Measures the time to retrieve a schema using floor semantics, where + * the registry returns the schema with the highest version ≤ the requested version. This pattern is used when + * data may be at intermediate versions not explicitly registered.
+ * + *The lookup versions include both exact matches (10, 20, 30, ...) and + * in-between values (5, 15, 25, ...) to exercise both fast-path exact matches and slower floor searches.
+ * + * @param s the shared benchmark state containing the registry and lookup versions + * @param t the per-thread state providing random lookup indices + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void floorLookup(final BenchmarkState s, + final ThreadState t, + final Blackhole blackhole) { + final int index = t.nextFloorIndex(); + final Schema schema = s.registry.get(s.lookupVersions[index]); + blackhole.consume(schema); + } + + /** + * Benchmarks latest schema retrieval performance. + * + *Measures the time to retrieve the most recent schema from the registry. + * This operation should be O(1) as the latest schema is typically cached or stored in a dedicated field.
+ * + *This benchmark serves as a baseline for the fastest possible lookup + * operation and helps identify any unexpected overhead in the registry implementation.
+ * + * @param s the shared benchmark state containing the registry + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void latestLookup(final BenchmarkState s, + final Blackhole blackhole) { + final Schema schema = s.registry.latest(); + blackhole.consume(schema); + } + + /** + * Benchmarks sequential lookup of all registered schemas. + * + *Measures the aggregate time to look up every schema in the registry + * in version order. This pattern occurs during schema validation, debugging, or when building migration path + * analyses.
+ * + *Note: This benchmark performs multiple lookups per invocation + * ({@code schemaCount} lookups). The reported time is for the entire sequence, not per-lookup. Divide by + * {@code schemaCount} to get per-lookup overhead.
+ * + * @param s the shared benchmark state containing the registry and versions + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void sequentialLookup(final BenchmarkState s, + final Blackhole blackhole) { + for (final DataVersion version : s.versions) { + final Schema schema = s.registry.get(version); + blackhole.consume(schema); + } + } + + /** + * Shared JMH state containing the schema registry and version arrays. + * + *This state is shared across all threads within a benchmark trial + * ({@link Scope#Benchmark}). The registry is populated with mock schemas at versions 10, 20, 30, ... up to + * {@code schemaCount * 10}.
+ * + *The registry is frozen after setup to match production usage patterns + * where registries are immutable during normal operation.
+ */ + @State(Scope.Benchmark) + public static class BenchmarkState { + + /** + * The number of schemas to register, injected by JMH. + * + *Controls the size of the schema registry to measure lookup + * performance scaling:
+ *Used by {@link #exactLookup} to ensure lookups always hit + * registered versions.
+ */ + public DataVersion[] versions; + + /** + * Array of lookup versions including in-between values (5, 10, 15, 20, ...). + * + *Used by {@link #floorLookup} to exercise both exact matches + * and floor search behavior.
+ */ + public DataVersion[] lookupVersions; + + /** + * Initializes the schema registry and version arrays once per trial. + * + *Creates a {@link SimpleSchemaRegistry} populated with minimal mock + * schemas at regular version intervals. The registry is frozen after population to enable any internal + * optimizations.
+ */ + @Setup(Level.Trial) + public void setup() { + final SimpleSchemaRegistry simpleRegistry = new SimpleSchemaRegistry(); + this.versions = new DataVersion[this.schemaCount]; + + for (int i = 0; i < this.schemaCount; i++) { + final int version = (i + 1) * 10; + final DataVersion dataVersion = new DataVersion(version); + this.versions[i] = dataVersion; + simpleRegistry.register(MockSchemas.minimal(version)); + } + + simpleRegistry.freeze(); + this.registry = simpleRegistry; + + this.lookupVersions = new DataVersion[this.schemaCount * 2]; + for (int i = 0; i < this.lookupVersions.length; i++) { + this.lookupVersions[i] = new DataVersion((i + 1) * 5); + } + } + } + + /** + * Per-thread JMH state providing pre-generated random lookup indices. + * + *Random number generation is expensive and would dominate the benchmark + * if performed in the hot path. This state pre-generates buffers of random indices during setup, allowing the + * benchmark methods to retrieve indices via simple array access and bit masking.
+ * + *Each thread has its own state instance ({@link Scope#Thread}) to avoid + * contention on shared RNG state. The fixed seed ensures reproducible results across benchmark runs.
+ * + * @see BenchmarkState + */ + @State(Scope.Thread) + public static class ThreadState { + + /** + * Size of the pre-generated index buffer. + * + *Power-of-two size enables cheap index wrapping via bit masking + * instead of modulo operation.
+ */ + private static final int INDEX_BUFFER_SIZE = 1024; + + /** + * Bit mask for wrapping cursor to buffer bounds ({@code INDEX_BUFFER_SIZE - 1}). + */ + private static final int INDEX_MASK = INDEX_BUFFER_SIZE - 1; + + /** + * Pre-generated indices into {@link BenchmarkState#versions}. + */ + private final int[] exactIndices = new int[INDEX_BUFFER_SIZE]; + + /** + * Pre-generated indices into {@link BenchmarkState#lookupVersions}. + */ + private final int[] floorIndices = new int[INDEX_BUFFER_SIZE]; + + /** + * Current position in {@link #exactIndices}. + */ + private int exactCursor; + + /** + * Current position in {@link #floorIndices}. + */ + private int floorCursor; + + /** + * Thread-local random number generator for index generation. + */ + private SplittableRandom random; + + /** + * Initializes the random number generator once per trial. + * + *Uses a fixed seed (42) for reproducibility. Each thread gets its + * own {@link SplittableRandom} instance to avoid synchronization overhead.
+ */ + @Setup(Level.Trial) + public void setupTrial() { + this.random = new SplittableRandom(42L); + } + + /** + * Refills the index buffers at each iteration. + * + *Generates fresh random indices based on the current + * {@link BenchmarkState#schemaCount} parameter. Resets cursors to the beginning of each buffer.
+ * + * @param s the shared benchmark state providing array bounds + */ + @Setup(Level.Iteration) + public void setupIteration(final BenchmarkState s) { + for (int i = 0; i < INDEX_BUFFER_SIZE; i++) { + this.exactIndices[i] = this.random.nextInt(s.versions.length); + this.floorIndices[i] = this.random.nextInt(s.lookupVersions.length); + } + this.exactCursor = 0; + this.floorCursor = 0; + } + + /** + * Returns the next random index for exact version lookup. + * + *Uses bit masking to wrap around the buffer efficiently.
+ * + * @return a random index into {@link BenchmarkState#versions} + */ + public int nextExactIndex() { + return this.exactIndices[this.exactCursor++ & INDEX_MASK]; + } + + /** + * Returns the next random index for floor version lookup. + * + *Uses bit masking to wrap around the buffer efficiently.
+ * + * @return a random index into {@link BenchmarkState#lookupVersions} + */ + public int nextFloorIndex() { + return this.floorIndices[this.floorCursor++ & INDEX_MASK]; + } + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java new file mode 100644 index 0000000..c74d288 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java @@ -0,0 +1,315 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.core; + +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark for single DataFix application performance. + * + *Measures the overhead of applying a single fix to data of varying sizes. + * Includes a baseline identity fix measurement to isolate framework overhead from actual transformation costs.
+ * + *| Setting | Value |
|---|---|
| Warmup | 5 iterations, 1 second each |
| Measurement | 10 iterations, 1 second each |
| Forks | 2 (for statistical significance) |
| JVM Heap | 2 GB min/max |
| Time Unit | Microseconds |
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar SingleFixBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar SingleFixBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific payload size only
+ * java -jar benchmarks.jar SingleFixBenchmark -p payloadSize=SMALL
+ * }
+ *
+ * @author Erik Pförtner
+ * @see BenchmarkBootstrap
+ * @see BenchmarkDataGenerator
+ * @see PayloadSize
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class SingleFixBenchmark {
+
+ /**
+ * Benchmarks a single field rename operation.
+ *
+ * Measures the performance of renaming one field in the input data. + * This represents a common, lightweight migration operation. The benchmark is parameterized by {@link PayloadSize} + * to measure scaling behavior.
+ * + * @param s the shared benchmark state containing fixer and input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void singleRenameFix(final SizedState s, + final Blackhole blackhole) { + blackhole.consume(s.fixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + s.input, + s.fromVersion, + s.toVersion)); + } + + /** + * Benchmarks the identity (no-op) fix as a baseline measurement. + * + *Measures pure framework overhead without any actual data transformation. + * Use this as a baseline to calculate the true cost of transformations by subtracting identity time from other + * benchmark results.
+ * + * @param s the shared benchmark state containing identity fixer and input data + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void identityFix(final SizedState s, + final Blackhole blackhole) { + blackhole.consume(s.identityFixer.update( + BenchmarkBootstrap.BENCHMARK_TYPE, + s.input, + s.fromVersion, + s.toVersion)); + } + + /** + * Benchmarks a complex player data transformation with codec roundtrip. + * + *Measures the performance of a realistic migration scenario where data + * is decoded via codec, transformed, and re-encoded. This represents the upper bound of migration cost for complex + * object transformations.
+ * + *This benchmark is expected to be significantly slower than {@link #singleRenameFix} + * because codec roundtrips involve reflection, object instantiation, and full serialization/deserialization + * cycles.
+ * + * @param s the shared player benchmark state + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void playerDataFix(final PlayerState s, + final Blackhole blackhole) { + blackhole.consume(s.playerFixer.update( + BenchmarkBootstrap.PLAYER_TYPE, + s.playerInput, + s.fromVersion, + s.toVersion)); + } + + /** + * Benchmarks the complete end-to-end pipeline including setup overhead. + * + *Measures the total cost of a migration including:
+ *This benchmark is useful for understanding cold-start performance + * and the cost of creating new DataFixer instances. In production code, + * DataFixers should be reused rather than recreated per-operation.
+ * + *Note: Results will be significantly slower than {@link #playerDataFix} + * due to setup overhead included in each iteration.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void playerDataFixEndToEnd(final Blackhole blackhole) { + final DynamicThis state is shared across all threads within a benchmark trial + * ({@link Scope#Benchmark}). The {@link #payloadSize} parameter controls the complexity of test data:
+ * + *Creates fixers and generates test data based on the current + * {@link #payloadSize} parameter value.
+ */ + @Setup(Level.Trial) + public void setup() { + this.fixer = BenchmarkBootstrap.createSingleFixFixer(); + this.identityFixer = BenchmarkBootstrap.createIdentityFixer(); + this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, this.payloadSize); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + } + + /** + * Shared JMH state for player-specific benchmarks. + * + *This state is separate from {@link SizedState} because the player benchmark + * uses a fixed, realistic data structure rather than parameterized payload sizes. The player data simulates a + * typical game entity with nested objects, arrays, and various field types.
+ * + *The player fix performs a complete codec roundtrip transformation, + * making it representative of real-world migration scenarios where data is decoded, transformed, and + * re-encoded.
+ * + * @see BenchmarkBootstrap#createPlayerFixer() + * @see BenchmarkDataGenerator#generatePlayerData + */ + @State(Scope.Benchmark) + public static class PlayerState { + + /** + * DataFixer configured with a player-specific transformation fix. Performs codec decode → transform → encode + * cycle. + */ + public DataFixer playerFixer; + + /** + * Pre-generated player data structure with realistic game entity fields. + */ + public DynamicCreates the player fixer and generates realistic player test data.
+ */ + @Setup(Level.Trial) + public void setup() { + this.playerFixer = BenchmarkBootstrap.createPlayerFixer(); + this.playerInput = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java new file mode 100644 index 0000000..32b058f --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * Core JMH benchmarks for the Aether DataFixers framework. + * + *This package contains benchmarks that measure the fundamental performance characteristics + * of the data fixer system, including fix application, chain execution, and schema registry + * operations. These benchmarks form the foundation for performance regression testing and + * optimization efforts.
+ * + *| Class | + *Focus Area | + *Key Metrics | + *
|---|---|---|
| {@link de.splatgames.aether.datafixers.benchmarks.core.SingleFixBenchmark} | + *Single fix application | + *Per-fix overhead, payload size scaling | + *
| {@link de.splatgames.aether.datafixers.benchmarks.core.MultiFixChainBenchmark} | + *Chained fix execution | + *Chain length scaling, partial migration cost | + *
| {@link de.splatgames.aether.datafixers.benchmarks.core.SchemaLookupBenchmark} | + *Schema registry operations | + *Lookup latency, registry size scaling | + *
{@code
+ * # Run all core benchmarks
+ * java -jar benchmarks.jar ".*core.*"
+ *
+ * # Run with specific JVM options
+ * java -jar benchmarks.jar ".*core.*" -jvmArgs "-XX:+UseG1GC"
+ *
+ * # Generate JSON report
+ * java -jar benchmarks.jar ".*core.*" -rf json -rff core_results.json
+ * }
+ *
+ * All benchmarks in this package report both throughput (ops/time) and average time (time/op). + * When comparing results:
+ *This benchmark measures the overhead of converting data between different + * serialization formats using the {@code DynamicOps.convertTo()} mechanism. Cross-format + * conversion is essential when integrating systems that use different data formats + * or when migrating data through format-agnostic DataFixers.
+ * + *| From \ To | + *Gson | + *Jackson JSON | + *SnakeYAML | + *Jackson YAML | + *
|---|---|---|---|---|
| Gson | + *- | + *✓ | + *✓ | + *- | + *
| Jackson JSON | + *✓ | + *- | + *- | + *✓ | + *
| SnakeYAML | + *✓ | + *- | + *- | + *✓ | + *
| Jackson YAML | + *- | + *✓ | + *✓ | + *- | + *
| Parameter | Values | Description |
|---|---|---|
| payloadSize | SMALL, MEDIUM | Test data complexity |
| Setting | Value |
|---|---|
| Warmup | 5 iterations, 1 second each |
| Measurement | 10 iterations, 1 second each |
| Forks | 2 |
| JVM Heap | 2 GB min/max |
| Time Unit | Microseconds |
{@code
+ * # Run all cross-format benchmarks
+ * java -jar benchmarks.jar CrossFormatBenchmark
+ *
+ * # Run JSON library conversions only
+ * java -jar benchmarks.jar "CrossFormatBenchmark.*(gson|jackson)To(Gson|Jackson).*"
+ *
+ * # Run YAML conversions only
+ * java -jar benchmarks.jar "CrossFormatBenchmark.*Yaml.*"
+ *
+ * # Compare with specific payload size
+ * java -jar benchmarks.jar CrossFormatBenchmark -p payloadSize=MEDIUM
+ * }
+ *
+ * @author Erik Pförtner
+ * @see JsonBenchmark
+ * @see YamlBenchmark
+ * @see TomlXmlBenchmark
+ * @see de.splatgames.aether.datafixers.api.dynamic.DynamicOps#convertTo(DynamicOps, Object)
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class CrossFormatBenchmark {
+
+ /**
+ * Payload size parameter controlling test data complexity.
+ *
+ * Limited to SMALL and MEDIUM as cross-format conversion overhead + * can be significant with large data sets.
+ */ + @Param({"SMALL", "MEDIUM"}) + private PayloadSize payloadSize; + + /** + * Google Gson DynamicOps implementation. + */ + private GsonOps gsonOps; + + /** + * Jackson JSON DynamicOps implementation. + */ + private JacksonJsonOps jacksonJsonOps; + + /** + * SnakeYAML DynamicOps implementation using native Java types. + */ + private SnakeYamlOps snakeYamlOps; + + /** + * Jackson YAML DynamicOps implementation. + */ + private JacksonYamlOps jacksonYamlOps; + + /** + * Pre-generated Gson root element for conversion benchmarks. + */ + private JsonElement gsonRoot; + + /** + * Pre-generated Jackson JSON root node for conversion benchmarks. + */ + private JsonNode jacksonJsonRoot; + + /** + * Pre-generated SnakeYAML root object for conversion benchmarks. + */ + private Object snakeYamlRoot; + + /** + * Pre-generated Jackson YAML root node for conversion benchmarks. + */ + private JsonNode jacksonYamlRoot; + + /** + * Initializes all DynamicOps instances and pre-generates test data in each format. + * + *Data is pre-generated in each format to ensure conversion benchmarks measure + * only the conversion overhead, not data generation time.
+ */ + @Setup(Level.Trial) + public void setup() { + this.gsonOps = GsonOps.INSTANCE; + this.jacksonJsonOps = JacksonJsonOps.INSTANCE; + this.snakeYamlOps = SnakeYamlOps.INSTANCE; + this.jacksonYamlOps = JacksonYamlOps.INSTANCE; + + this.gsonRoot = BenchmarkDataGenerator.generate(this.gsonOps, this.payloadSize).value(); + this.jacksonJsonRoot = BenchmarkDataGenerator.generate(this.jacksonJsonOps, this.payloadSize).value(); + this.snakeYamlRoot = BenchmarkDataGenerator.generate(this.snakeYamlOps, this.payloadSize).value(); + this.jacksonYamlRoot = BenchmarkDataGenerator.generate(this.jacksonYamlOps, this.payloadSize).value(); + } + + // ==================== Gson <-> Jackson JSON Conversions ==================== + + /** + * Benchmarks conversion from Gson JsonElement to Jackson JsonNode. + * + *Measures the overhead of converting between two JSON libraries. + * Both represent JSON but use different internal tree structures.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonToJackson(final Blackhole blackhole) { + final JsonNode result = this.jacksonJsonOps.convertTo(this.gsonOps, this.gsonRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from Jackson JsonNode to Gson JsonElement. + * + *Measures the reverse conversion from Jackson to Gson representation.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonToGson(final Blackhole blackhole) { + final JsonElement result = this.gsonOps.convertTo(this.jacksonJsonOps, this.jacksonJsonRoot); + blackhole.consume(result); + } + + // ==================== Gson <-> SnakeYAML Conversions ==================== + + /** + * Benchmarks conversion from Gson JsonElement to SnakeYAML native types. + * + *Measures cross-ecosystem conversion from JSON library to YAML library. + * SnakeYAML uses native Java Maps and Lists internally.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonToSnakeYaml(final Blackhole blackhole) { + final Object result = this.snakeYamlOps.convertTo(this.gsonOps, this.gsonRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from SnakeYAML native types to Gson JsonElement. + * + *Measures cross-ecosystem conversion from YAML native types to JSON tree.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlToGson(final Blackhole blackhole) { + final JsonElement result = this.gsonOps.convertTo(this.snakeYamlOps, this.snakeYamlRoot); + blackhole.consume(result); + } + + // ==================== Jackson JSON <-> Jackson YAML Conversions ==================== + + /** + * Benchmarks conversion from Jackson JSON to Jackson YAML. + * + *Measures conversion within the Jackson ecosystem. Both formats use + * JsonNode internally, potentially enabling optimizations.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonJsonToYaml(final Blackhole blackhole) { + final JsonNode result = this.jacksonYamlOps.convertTo(this.jacksonJsonOps, this.jacksonJsonRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from Jackson YAML to Jackson JSON. + * + *Measures reverse conversion within the Jackson ecosystem.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlToJson(final Blackhole blackhole) { + final JsonNode result = this.jacksonJsonOps.convertTo(this.jacksonYamlOps, this.jacksonYamlRoot); + blackhole.consume(result); + } + + // ==================== SnakeYAML <-> Jackson YAML Conversions ==================== + + /** + * Benchmarks conversion from SnakeYAML native types to Jackson YAML JsonNode. + * + *Measures conversion between two YAML libraries with different internal + * representations (native Java types vs JsonNode).
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void snakeYamlToJacksonYaml(final Blackhole blackhole) { + final JsonNode result = this.jacksonYamlOps.convertTo(this.snakeYamlOps, this.snakeYamlRoot); + blackhole.consume(result); + } + + /** + * Benchmarks conversion from Jackson YAML JsonNode to SnakeYAML native types. + * + *Measures reverse conversion from JsonNode to native Java Maps/Lists.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonYamlToSnakeYaml(final Blackhole blackhole) { + final Object result = this.snakeYamlOps.convertTo(this.jacksonYamlOps, this.jacksonYamlRoot); + blackhole.consume(result); + } +} diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java new file mode 100644 index 0000000..1a87c58 --- /dev/null +++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java @@ -0,0 +1,410 @@ +/* + * Copyright (c) 2026 Splatgames.de Software and Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package de.splatgames.aether.datafixers.benchmarks.format; + +import com.fasterxml.jackson.databind.JsonNode; +import com.google.gson.JsonElement; +import de.splatgames.aether.datafixers.api.DataVersion; +import de.splatgames.aether.datafixers.api.dynamic.Dynamic; +import de.splatgames.aether.datafixers.api.fix.DataFixer; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap; +import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator; +import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize; +import de.splatgames.aether.datafixers.codec.json.gson.GsonOps; +import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.jetbrains.annotations.Nullable; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +/** + * JMH benchmark comparing JSON DynamicOps implementations: Gson vs Jackson. + * + *This benchmark measures the performance of JSON-based operations using two + * different underlying libraries: Google Gson ({@link GsonOps}) and Jackson Databind + * ({@link JacksonJsonOps}). The results help determine which implementation is more + * suitable for specific use cases.
+ * + *Measure Dynamic object construction performance:
+ *Measure field read operations on existing data:
+ *Measure field write/set operations:
+ *Measure DataFixer migration performance:
+ *| Implementation | Library | Node Type | Characteristics |
|---|---|---|---|
| {@link GsonOps} | + *Google Gson | + *{@code JsonElement} | + *Simple API, smaller footprint, widely used | + *
| {@link JacksonJsonOps} | + *Jackson Databind | + *{@code JsonNode} | + *Feature-rich, streaming support, high performance | + *
| Parameter | Values | Description |
|---|---|---|
| payloadSize | SMALL, MEDIUM, LARGE | Test data complexity |
| Setting | Value |
|---|---|
| Warmup | 5 iterations, 1 second each |
| Measurement | 10 iterations, 1 second each |
| Forks | 2 |
| JVM Heap | 2 GB min/max |
| Time Unit | Microseconds |
{@code
+ * # Run all JSON benchmarks
+ * java -jar benchmarks.jar JsonBenchmark
+ *
+ * # Compare only field access performance
+ * java -jar benchmarks.jar "JsonBenchmark.*FieldRead"
+ *
+ * # Run Gson-only benchmarks
+ * java -jar benchmarks.jar "JsonBenchmark.gson.*"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar JsonBenchmark -p payloadSize=LARGE
+ * }
+ *
+ * @author Erik Pförtner
+ * @see YamlBenchmark
+ * @see TomlXmlBenchmark
+ * @see CrossFormatBenchmark
+ * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps
+ * @see de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class JsonBenchmark {
+
+ /**
+ * Field name used for read/write benchmarks.
+ *
+ * References the first string field generated by {@link BenchmarkDataGenerator}.
+ */ + private static final String FIELD_NAME = "stringField0"; + + /** + * Payload size parameter controlling test data complexity. + * + *Injected by JMH to run benchmarks across different data sizes.
+ */ + @Param({"SMALL", "MEDIUM", "LARGE"}) + private PayloadSize payloadSize; + + /** + * Google Gson DynamicOps implementation. + */ + private GsonOps gsonOps; + + /** + * Jackson Databind DynamicOps implementation. + */ + private JacksonJsonOps jacksonOps; + + /** + * Pre-generated test data using Gson. + */ + private DynamicMay be {@code null} if no dedicated Jackson fixer is configured. + * In that case, cross-format migration behavior is measured instead.
+ */ + @Nullable + private DataFixer jacksonFixer; + + /** + * Source version for migrations (v1). + */ + private DataVersion fromVersion; + + /** + * Target version for migrations (v2). + */ + private DataVersion toVersion; + + /** + * Initializes DynamicOps instances, test data, and DataFixers. + * + *Both Gson and Jackson data are pre-generated to isolate benchmark + * measurements from data creation overhead (except for generation benchmarks).
+ */ + @Setup(Level.Trial) + public void setup() { + this.gsonOps = GsonOps.INSTANCE; + this.jacksonOps = JacksonJsonOps.INSTANCE; + + this.gsonData = BenchmarkDataGenerator.generate(this.gsonOps, this.payloadSize); + this.jacksonData = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize); + + this.gsonFixer = BenchmarkBootstrap.createSingleFixFixer(); + + // If you have a dedicated Jackson fixer, wire it here. Otherwise keep it null and measure cross-format explicitly. + // Example (if you add it later): this.jacksonFixer = BenchmarkBootstrap.createSingleFixFixerJackson(); + this.jacksonFixer = null; + + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + + // ==================== Data Generation Benchmarks ==================== + + /** + * Benchmarks Dynamic object generation using GsonOps. + * + *Measures the time to create a complete test data structure using + * Gson as the underlying JSON representation.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonGenerate(final Blackhole blackhole) { + final DynamicMeasures the time to create a complete test data structure using + * Jackson as the underlying JSON representation.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonGenerate(final Blackhole blackhole) { + final DynamicMeasures the time to retrieve a single field from a pre-existing + * Gson-based Dynamic object.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonFieldRead(final Blackhole blackhole) { + final DynamicMeasures the time to retrieve a single field from a pre-existing + * Jackson-based Dynamic object.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonFieldRead(final Blackhole blackhole) { + final DynamicMeasures the time to add a new field to a Gson-based Dynamic object. + * This operation typically creates a new Dynamic with the modified content.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonFieldSet(final Blackhole blackhole) { + final DynamicMeasures the time to add a new field to a Jackson-based Dynamic object.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonFieldSet(final Blackhole blackhole) { + final DynamicMeasures the time to apply a single fix migration to Gson-based + * Dynamic data. This represents the typical migration scenario where + * both fixer and data use the same DynamicOps implementation.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void gsonMigration(final Blackhole blackhole) { + final DynamicIf a dedicated Jackson fixer is available, measures native Jackson + * migration. Otherwise, falls back to cross-format migration using the + * Gson-based fixer with Jackson input data.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void jacksonMigration(final Blackhole blackhole) { + if (this.jacksonFixer == null) { + // No dedicated Jackson fixer available -> this would not be a fair "Jackson migration" benchmark. + // Measure the cross-format behavior explicitly instead. + final DynamicMeasures the performance overhead when the fixer's DynamicOps differs + * from the input data's DynamicOps. This scenario is common when migrating + * data from various sources through a centralized fixer.
+ * + *Comparing this benchmark with {@link #gsonMigration} reveals the + * overhead of format conversion during migration.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void crossFormatMigrationJacksonInput(final Blackhole blackhole) { + final DynamicThis benchmark measures the performance of TOML and XML format operations + * using Jackson-based implementations ({@link JacksonTomlOps} and {@link JacksonXmlOps}). + * Both formats share Jackson's unified API, enabling direct performance comparison.
+ * + *Measure Dynamic object construction performance:
+ *Measure field read operations on existing data:
+ *Measure field write/set operations:
+ *Measure DataFixer migration performance:
+ *| Implementation | Library | Node Type | Use Case |
|---|---|---|---|
| {@link JacksonTomlOps} | + *Jackson Dataformat TOML | + *{@code JsonNode} | + *Configuration files, Rust ecosystem integration | + *
| {@link JacksonXmlOps} | + *Jackson Dataformat XML | + *{@code JsonNode} | + *Legacy systems, SOAP/REST APIs, document formats | + *
| Parameter | Values | Description |
|---|---|---|
| payloadSize | SMALL, MEDIUM | Test data complexity (LARGE excluded for performance) |
Note: The LARGE payload size is excluded from this benchmark because + * TOML and XML serialization typically have higher overhead than JSON/YAML, + * making large payloads impractical for typical use cases.
+ * + *| Setting | Value |
|---|---|
| Warmup | 5 iterations, 1 second each |
| Measurement | 10 iterations, 1 second each |
| Forks | 2 |
| JVM Heap | 2 GB min/max |
| Time Unit | Microseconds |
{@code
+ * # Run all TOML/XML benchmarks
+ * java -jar benchmarks.jar TomlXmlBenchmark
+ *
+ * # Run TOML-only benchmarks
+ * java -jar benchmarks.jar "TomlXmlBenchmark.toml.*"
+ *
+ * # Run XML-only benchmarks
+ * java -jar benchmarks.jar "TomlXmlBenchmark.xml.*"
+ *
+ * # Compare generation performance
+ * java -jar benchmarks.jar "TomlXmlBenchmark.*Generate"
+ * }
+ *
+ * @author Erik Pförtner
+ * @see JsonBenchmark
+ * @see YamlBenchmark
+ * @see CrossFormatBenchmark
+ * @see de.splatgames.aether.datafixers.codec.toml.jackson.JacksonTomlOps
+ * @see de.splatgames.aether.datafixers.codec.xml.jackson.JacksonXmlOps
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class TomlXmlBenchmark {
+
+ /**
+ * Field name used for read/write benchmarks.
+ *
+ * References the first string field generated by {@link BenchmarkDataGenerator}.
+ */ + private static final String FIELD_NAME = "stringField0"; + + /** + * Payload size parameter controlling test data complexity. + * + *Limited to SMALL and MEDIUM to avoid excessive benchmark runtime + * with the higher overhead of TOML and XML processing.
+ */ + @Param({"SMALL", "MEDIUM"}) + private PayloadSize payloadSize; + + /** + * Jackson TOML DynamicOps implementation. + */ + private JacksonTomlOps tomlOps; + + /** + * Jackson XML DynamicOps implementation. + */ + private JacksonXmlOps xmlOps; + + /** + * Pre-generated test data using TOML format. + */ + private DynamicBoth TOML and XML data are pre-generated to isolate benchmark + * measurements from data creation overhead.
+ */ + @Setup(Level.Trial) + public void setup() { + this.tomlOps = JacksonTomlOps.INSTANCE; + this.xmlOps = JacksonXmlOps.INSTANCE; + + this.tomlData = BenchmarkDataGenerator.generate(this.tomlOps, this.payloadSize); + this.xmlData = BenchmarkDataGenerator.generate(this.xmlOps, this.payloadSize); + + this.fixer = BenchmarkBootstrap.createSingleFixFixer(); + this.fromVersion = new DataVersion(1); + this.toVersion = new DataVersion(2); + } + + // ==================== Data Generation Benchmarks ==================== + + /** + * Benchmarks Dynamic object generation using JacksonTomlOps. + * + *Measures the time to create a complete test data structure using + * Jackson's TOML dataformat module.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlGenerate(final Blackhole blackhole) { + final DynamicMeasures the time to create a complete test data structure using + * Jackson's XML dataformat module.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlGenerate(final Blackhole blackhole) { + final DynamicMeasures the time to retrieve a single field from a pre-existing + * TOML-based Dynamic object.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlFieldRead(final Blackhole blackhole) { + final DynamicMeasures the time to retrieve a single field from a pre-existing + * XML-based Dynamic object.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlFieldRead(final Blackhole blackhole) { + final DynamicMeasures the time to add a new field to a TOML-based Dynamic object.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlFieldSet(final Blackhole blackhole) { + final DynamicMeasures the time to add a new field to an XML-based Dynamic object.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlFieldSet(final Blackhole blackhole) { + final DynamicMeasures the time to apply a single fix migration to TOML-based + * Dynamic data.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void tomlMigration(final Blackhole blackhole) { + final DynamicMeasures the time to apply a single fix migration to XML-based + * Dynamic data.
+ * + * @param blackhole JMH blackhole to prevent dead code elimination + */ + @Benchmark + public void xmlMigration(final Blackhole blackhole) { + final DynamicThis benchmark measures the performance of YAML-based operations using two + * different underlying libraries: SnakeYAML ({@link SnakeYamlOps}) and Jackson YAML + * ({@link JacksonYamlOps}). YAML is commonly used for configuration files and + * human-readable data serialization.
+ * + *Measure Dynamic object construction performance:
+ *Measure field read operations on existing data:
+ *Measure field write/set operations:
+ *Measure DataFixer migration performance:
+ *| Implementation | Library | Node Type | Characteristics |
|---|---|---|---|
| {@link SnakeYamlOps} | + *SnakeYAML | + *{@code Object} (native Java types) | + *Native YAML library, uses Maps/Lists, anchors & aliases support | + *
| {@link JacksonYamlOps} | + *Jackson Dataformat YAML | + *{@code JsonNode} | + *Unified Jackson API, shares code with JSON, streaming support | + *
| Parameter | Values | Description |
|---|---|---|
| payloadSize | SMALL, MEDIUM, LARGE | Test data complexity |
| Setting | Value |
|---|---|
| Warmup | 5 iterations, 1 second each |
| Measurement | 10 iterations, 1 second each |
| Forks | 2 |
| JVM Heap | 2 GB min/max |
| Time Unit | Microseconds |
{@code
+ * # Run all YAML benchmarks
+ * java -jar benchmarks.jar YamlBenchmark
+ *
+ * # Compare only generation performance
+ * java -jar benchmarks.jar "YamlBenchmark.*Generate"
+ *
+ * # Run SnakeYAML-only benchmarks
+ * java -jar benchmarks.jar "YamlBenchmark.snakeYaml.*"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar YamlBenchmark -p payloadSize=MEDIUM
+ * }
+ *
+ * @author Erik Pförtner
+ * @see JsonBenchmark
+ * @see TomlXmlBenchmark
+ * @see CrossFormatBenchmark
+ * @see de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps
+ * @see de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class YamlBenchmark {
+
+ /**
+ * Field name used for read/write benchmarks.
+ *
+ * References the first string field generated by {@link BenchmarkDataGenerator}.
+ */ + private static final String FIELD_NAME = "stringField0"; + + /** + * Payload size parameter controlling test data complexity. + * + *Injected by JMH to run benchmarks across different data sizes.
+ */ + @Param({"SMALL", "MEDIUM", "LARGE"}) + private PayloadSize payloadSize; + + /** + * SnakeYAML DynamicOps implementation using native Java types. + */ + private SnakeYamlOps snakeOps; + + /** + * Jackson YAML DynamicOps implementation using JsonNode. + */ + private JacksonYamlOps jacksonOps; + + /** + * Pre-generated test data using SnakeYAML. + */ + private Dynamic