+ *
+ * @author Erik Pförtner
+ * @since 1.0.0
+ */
+public final class BenchmarkRunner {
+
+ private BenchmarkRunner() {
+ // Main class
+ }
+
+ /**
+ * Main entry point for running benchmarks.
+ *
+ *
When run without arguments, executes all benchmarks in the package.
+ * Supports all standard JMH command-line arguments.
+ *
+ * @param args command-line arguments (passed to JMH)
+ * @throws RunnerException if benchmark execution fails
+ * @throws IOException if there is an I/O error
+ */
+ public static void main(final String[] args) throws RunnerException, IOException {
+ if (args.length > 0) {
+ // If arguments are provided, delegate to JMH main
+ org.openjdk.jmh.Main.main(args);
+ } else {
+ // Run with default options
+ runAllBenchmarks();
+ }
+ }
+
+ /**
+ * Runs all benchmarks with default configuration.
+ *
+ * @throws RunnerException if benchmark execution fails
+ */
+ public static void runAllBenchmarks() throws RunnerException {
+ final Options options = new OptionsBuilder()
+ .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\..*")
+ .warmupIterations(5)
+ .measurementIterations(10)
+ .forks(2)
+ .jvmArgs("-Xms2G", "-Xmx2G")
+ .build();
+
+ new Runner(options).run();
+ }
+
+ /**
+ * Runs a quick subset of benchmarks for validation.
+ *
+ *
Useful for CI/CD pipelines or quick sanity checks.
+ *
+ * @throws RunnerException if benchmark execution fails
+ */
+ public static void runQuickBenchmarks() throws RunnerException {
+ final Options options = new OptionsBuilder()
+ .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.core\\.SingleFixBenchmark")
+ .warmupIterations(2)
+ .measurementIterations(3)
+ .forks(1)
+ .jvmArgs("-Xms1G", "-Xmx1G")
+ .param("payloadSize", "SMALL")
+ .build();
+
+ new Runner(options).run();
+ }
+
+ /**
+ * Runs core migration benchmarks only.
+ *
+ * @throws RunnerException if benchmark execution fails
+ */
+ public static void runCoreBenchmarks() throws RunnerException {
+ final Options options = new OptionsBuilder()
+ .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.core\\..*")
+ .warmupIterations(5)
+ .measurementIterations(10)
+ .forks(2)
+ .jvmArgs("-Xms2G", "-Xmx2G")
+ .build();
+
+ new Runner(options).run();
+ }
+
+ /**
+ * Runs format comparison benchmarks only.
+ *
+ * @throws RunnerException if benchmark execution fails
+ */
+ public static void runFormatBenchmarks() throws RunnerException {
+ final Options options = new OptionsBuilder()
+ .include("de\\.splatgames\\.aether\\.datafixers\\.benchmarks\\.format\\..*")
+ .warmupIterations(5)
+ .measurementIterations(10)
+ .forks(2)
+ .jvmArgs("-Xms2G", "-Xmx2G")
+ .build();
+
+ new Runner(options).run();
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
new file mode 100644
index 0000000..2167a2d
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2025 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.codec;
+
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.codec.Codec;
+import de.splatgames.aether.datafixers.api.codec.Codecs;
+import de.splatgames.aether.datafixers.api.result.DataResult;
+import de.splatgames.aether.datafixers.api.util.Pair;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for collection codec encode/decode performance.
+ *
+ *
Measures the performance of encoding and decoding lists of various sizes
+ * using the {@link Codecs#list(Codec)} API.
+ *
+ * @author Erik Pförtner
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class CollectionCodecBenchmark {
+
+ @Param({"10", "100", "1000"})
+ private int listSize;
+
+ private Codec> stringListCodec;
+ private Codec> intListCodec;
+
+ private List stringList;
+ private List intList;
+
+ private JsonElement encodedStringList;
+ private JsonElement encodedIntList;
+
+ @Setup(Level.Trial)
+ public void setup() {
+ this.stringListCodec = Codecs.list(Codecs.STRING);
+ this.intListCodec = Codecs.list(Codecs.INT);
+
+ // Generate test data
+ this.stringList = new ArrayList<>(this.listSize);
+ this.intList = new ArrayList<>(this.listSize);
+
+ for (int i = 0; i < this.listSize; i++) {
+ this.stringList.add("item-" + i);
+ this.intList.add(i);
+ }
+
+ // Pre-encode for decode benchmarks
+ this.encodedStringList = this.stringListCodec.encodeStart(GsonOps.INSTANCE, this.stringList)
+ .result().orElseThrow();
+ this.encodedIntList = this.intListCodec.encodeStart(GsonOps.INSTANCE, this.intList)
+ .result().orElseThrow();
+ }
+
+ // ==================== String List ====================
+
+ /**
+ * Benchmarks encoding a list of strings.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void encodeStringList(final Blackhole blackhole) {
+ final DataResult result = this.stringListCodec.encodeStart(
+ GsonOps.INSTANCE, this.stringList);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks decoding a list of strings.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void decodeStringList(final Blackhole blackhole) {
+ final DataResult, JsonElement>> result = this.stringListCodec.decode(
+ GsonOps.INSTANCE, this.encodedStringList);
+ blackhole.consume(result);
+ }
+
+ // ==================== Integer List ====================
+
+ /**
+ * Benchmarks encoding a list of integers.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void encodeIntList(final Blackhole blackhole) {
+ final DataResult result = this.intListCodec.encodeStart(
+ GsonOps.INSTANCE, this.intList);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks decoding a list of integers.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void decodeIntList(final Blackhole blackhole) {
+ final DataResult, JsonElement>> result = this.intListCodec.decode(
+ GsonOps.INSTANCE, this.encodedIntList);
+ blackhole.consume(result);
+ }
+
+ // ==================== Round Trip ====================
+
+ /**
+ * Benchmarks round-trip encoding and decoding of a string list.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void roundTripStringList(final Blackhole blackhole) {
+ final DataResult encoded = this.stringListCodec.encodeStart(
+ GsonOps.INSTANCE, this.stringList);
+ final DataResult, JsonElement>> decoded = encoded.flatMap(
+ json -> this.stringListCodec.decode(GsonOps.INSTANCE, json));
+ blackhole.consume(decoded);
+ }
+
+ /**
+ * Benchmarks round-trip encoding and decoding of an integer list.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void roundTripIntList(final Blackhole blackhole) {
+ final DataResult encoded = this.intListCodec.encodeStart(
+ GsonOps.INSTANCE, this.intList);
+ final DataResult, JsonElement>> decoded = encoded.flatMap(
+ json -> this.intListCodec.decode(GsonOps.INSTANCE, json));
+ blackhole.consume(decoded);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
new file mode 100644
index 0000000..82b3d04
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2025 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.codec;
+
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.codec.Codec;
+import de.splatgames.aether.datafixers.api.codec.Codecs;
+import de.splatgames.aether.datafixers.api.result.DataResult;
+import de.splatgames.aether.datafixers.api.util.Pair;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for primitive codec encode/decode performance.
+ *
+ *
Measures the performance of encoding and decoding primitive types
+ * using the {@link Codecs} API.
+ *
+ * @author Erik Pförtner
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class PrimitiveCodecBenchmark {
+
+ // Test values
+ private static final boolean TEST_BOOL = true;
+ private static final int TEST_INT = 42;
+ private static final long TEST_LONG = 123456789L;
+ private static final float TEST_FLOAT = 3.14159f;
+ private static final double TEST_DOUBLE = 2.718281828;
+ private static final String TEST_STRING = "benchmark-test-string";
+
+ // Pre-encoded values for decode benchmarks
+ private JsonElement encodedBool;
+ private JsonElement encodedInt;
+ private JsonElement encodedLong;
+ private JsonElement encodedFloat;
+ private JsonElement encodedDouble;
+ private JsonElement encodedString;
+
+ @Setup(Level.Trial)
+ public void setup() {
+ this.encodedBool = Codecs.BOOL.encodeStart(GsonOps.INSTANCE, TEST_BOOL).result().orElseThrow();
+ this.encodedInt = Codecs.INT.encodeStart(GsonOps.INSTANCE, TEST_INT).result().orElseThrow();
+ this.encodedLong = Codecs.LONG.encodeStart(GsonOps.INSTANCE, TEST_LONG).result().orElseThrow();
+ this.encodedFloat = Codecs.FLOAT.encodeStart(GsonOps.INSTANCE, TEST_FLOAT).result().orElseThrow();
+ this.encodedDouble = Codecs.DOUBLE.encodeStart(GsonOps.INSTANCE, TEST_DOUBLE).result().orElseThrow();
+ this.encodedString = Codecs.STRING.encodeStart(GsonOps.INSTANCE, TEST_STRING).result().orElseThrow();
+ }
+
+ // ==================== Boolean ====================
+
+ @Benchmark
+ public void encodeBool(final Blackhole blackhole) {
+ final DataResult result = Codecs.BOOL.encodeStart(GsonOps.INSTANCE, TEST_BOOL);
+ blackhole.consume(result);
+ }
+
+ @Benchmark
+ public void decodeBool(final Blackhole blackhole) {
+ final DataResult> result = Codecs.BOOL.decode(GsonOps.INSTANCE, this.encodedBool);
+ blackhole.consume(result);
+ }
+
+ // ==================== Integer ====================
+
+ @Benchmark
+ public void encodeInt(final Blackhole blackhole) {
+ final DataResult result = Codecs.INT.encodeStart(GsonOps.INSTANCE, TEST_INT);
+ blackhole.consume(result);
+ }
+
+ @Benchmark
+ public void decodeInt(final Blackhole blackhole) {
+ final DataResult> result = Codecs.INT.decode(GsonOps.INSTANCE, this.encodedInt);
+ blackhole.consume(result);
+ }
+
+ // ==================== Long ====================
+
+ @Benchmark
+ public void encodeLong(final Blackhole blackhole) {
+ final DataResult result = Codecs.LONG.encodeStart(GsonOps.INSTANCE, TEST_LONG);
+ blackhole.consume(result);
+ }
+
+ @Benchmark
+ public void decodeLong(final Blackhole blackhole) {
+ final DataResult> result = Codecs.LONG.decode(GsonOps.INSTANCE, this.encodedLong);
+ blackhole.consume(result);
+ }
+
+ // ==================== Float ====================
+
+ @Benchmark
+ public void encodeFloat(final Blackhole blackhole) {
+ final DataResult result = Codecs.FLOAT.encodeStart(GsonOps.INSTANCE, TEST_FLOAT);
+ blackhole.consume(result);
+ }
+
+ @Benchmark
+ public void decodeFloat(final Blackhole blackhole) {
+ final DataResult> result = Codecs.FLOAT.decode(GsonOps.INSTANCE, this.encodedFloat);
+ blackhole.consume(result);
+ }
+
+ // ==================== Double ====================
+
+ @Benchmark
+ public void encodeDouble(final Blackhole blackhole) {
+ final DataResult result = Codecs.DOUBLE.encodeStart(GsonOps.INSTANCE, TEST_DOUBLE);
+ blackhole.consume(result);
+ }
+
+ @Benchmark
+ public void decodeDouble(final Blackhole blackhole) {
+ final DataResult> result = Codecs.DOUBLE.decode(GsonOps.INSTANCE, this.encodedDouble);
+ blackhole.consume(result);
+ }
+
+ // ==================== String ====================
+
+ @Benchmark
+ public void encodeString(final Blackhole blackhole) {
+ final DataResult result = Codecs.STRING.encodeStart(GsonOps.INSTANCE, TEST_STRING);
+ blackhole.consume(result);
+ }
+
+ @Benchmark
+ public void decodeString(final Blackhole blackhole) {
+ final DataResult> result = Codecs.STRING.decode(GsonOps.INSTANCE, this.encodedString);
+ blackhole.consume(result);
+ }
+
+ // ==================== Round Trip ====================
+
+ @Benchmark
+ public void roundTripInt(final Blackhole blackhole) {
+ final DataResult encoded = Codecs.INT.encodeStart(GsonOps.INSTANCE, TEST_INT);
+ final DataResult> decoded = encoded.flatMap(
+ json -> Codecs.INT.decode(GsonOps.INSTANCE, json));
+ blackhole.consume(decoded);
+ }
+
+ @Benchmark
+ public void roundTripString(final Blackhole blackhole) {
+ final DataResult encoded = Codecs.STRING.encodeStart(GsonOps.INSTANCE, TEST_STRING);
+ final DataResult> decoded = encoded.flatMap(
+ json -> Codecs.STRING.decode(GsonOps.INSTANCE, json));
+ blackhole.consume(decoded);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
new file mode 100644
index 0000000..f402cf8
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2025 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.concurrent;
+
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
+import de.splatgames.aether.datafixers.api.fix.DataFixer;
+import de.splatgames.aether.datafixers.api.schema.Schema;
+import de.splatgames.aether.datafixers.api.schema.SchemaRegistry;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import de.splatgames.aether.datafixers.core.schema.SimpleSchemaRegistry;
+import de.splatgames.aether.datafixers.testkit.factory.MockSchemas;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Threads;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for concurrent migration and registry access performance.
+ *
+ *
Measures the thread-safety and contention characteristics of the
+ * DataFixer and SchemaRegistry under concurrent load.
+ *
+ * @author Erik Pförtner
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 3, time = 2, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 5, time = 2, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class ConcurrentMigrationBenchmark {
+
+ @Param({"SMALL", "MEDIUM"})
+ private PayloadSize payloadSize;
+
+ // Shared state across threads
+ private DataFixer sharedFixer;
+ private DataFixer sharedChainFixer;
+ private SchemaRegistry sharedRegistry;
+ private DataVersion fromVersion;
+ private DataVersion toVersion;
+ private DataVersion chainToVersion;
+ private DataVersion[] registryVersions;
+
+ @Setup(Level.Trial)
+ public void setup() {
+ // Create shared fixer (thread-safe after freeze)
+ this.sharedFixer = BenchmarkBootstrap.createSingleFixFixer();
+ this.sharedChainFixer = BenchmarkBootstrap.createChainFixer(10);
+ this.fromVersion = new DataVersion(1);
+ this.toVersion = new DataVersion(2);
+ this.chainToVersion = new DataVersion(11);
+
+ // Create shared registry
+ final SimpleSchemaRegistry registry = new SimpleSchemaRegistry();
+ this.registryVersions = new DataVersion[100];
+ for (int i = 0; i < 100; i++) {
+ final int version = (i + 1) * 10;
+ this.registryVersions[i] = new DataVersion(version);
+ registry.register(MockSchemas.minimal(version));
+ }
+ registry.freeze();
+ this.sharedRegistry = registry;
+ }
+
+ /**
+ * Per-thread state for independent test data.
+ */
+ @State(Scope.Thread)
+ public static class ThreadState {
+
+ private Dynamic threadInput;
+ private Random random;
+
+ @Setup(Level.Iteration)
+ public void setup(final ConcurrentMigrationBenchmark parent) {
+ // Each thread gets its own input data
+ this.threadInput = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, parent.payloadSize);
+ this.random = new Random();
+ }
+ }
+
+ // ==================== Concurrent Migration ====================
+
+ /**
+ * Benchmarks concurrent single-fix migrations using all available processors.
+ *
+ * @param state per-thread state
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(Threads.MAX)
+ public void concurrentSingleFix(final ThreadState state, final Blackhole blackhole) {
+ final Dynamic result = this.sharedFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ state.threadInput,
+ this.fromVersion,
+ this.toVersion);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks concurrent chain migrations using all available processors.
+ *
+ * @param state per-thread state
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(Threads.MAX)
+ public void concurrentChainMigration(final ThreadState state, final Blackhole blackhole) {
+ final Dynamic result = this.sharedChainFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ state.threadInput,
+ this.fromVersion,
+ this.chainToVersion);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks concurrent migrations with 4 threads.
+ *
+ * @param state per-thread state
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(4)
+ public void fourThreadMigration(final ThreadState state, final Blackhole blackhole) {
+ final Dynamic result = this.sharedFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ state.threadInput,
+ this.fromVersion,
+ this.toVersion);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks concurrent migrations with 8 threads.
+ *
+ * @param state per-thread state
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(8)
+ public void eightThreadMigration(final ThreadState state, final Blackhole blackhole) {
+ final Dynamic result = this.sharedFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ state.threadInput,
+ this.fromVersion,
+ this.toVersion);
+ blackhole.consume(result);
+ }
+
+ // ==================== Concurrent Registry Access ====================
+
+ /**
+ * Benchmarks concurrent schema registry lookups.
+ *
+ * @param state per-thread state
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(Threads.MAX)
+ public void concurrentRegistryLookup(final ThreadState state, final Blackhole blackhole) {
+ final int index = state.random.nextInt(this.registryVersions.length);
+ final Schema schema = this.sharedRegistry.get(this.registryVersions[index]);
+ blackhole.consume(schema);
+ }
+
+ /**
+ * Benchmarks concurrent latest schema access.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ @Threads(Threads.MAX)
+ public void concurrentLatestLookup(final Blackhole blackhole) {
+ final Schema schema = this.sharedRegistry.latest();
+ blackhole.consume(schema);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
new file mode 100644
index 0000000..90818ff
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2025 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.core;
+
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
+import de.splatgames.aether.datafixers.api.fix.DataFixer;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for multi-fix chain migration performance.
+ *
+ *
Measures the performance of applying multiple sequential fixes,
+ * simulating real-world migration scenarios where data may need to
+ * traverse many version upgrades.
All fixes in the chain perform the same operation type (rename),
+ * measuring sequential fix application overhead.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void renameChain(final Blackhole blackhole) {
+ final Dynamic result = this.chainFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.input,
+ this.fromVersion,
+ this.toVersion);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks applying a chain of mixed fix types.
+ *
+ *
Includes rename, add, remove, and transform operations
+ * for more realistic migration scenarios.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void mixedChain(final Blackhole blackhole) {
+ if (this.mixedFixer == null) {
+ // Skip for fixCount < 4
+ blackhole.consume(this.input);
+ return;
+ }
+ final Dynamic result = this.mixedFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.input,
+ this.fromVersion,
+ this.toVersion);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks partial migration (half the chain).
+ *
+ *
Measures performance when migrating to an intermediate version
+ * rather than the latest version.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void partialChain(final Blackhole blackhole) {
+ final int halfwayVersion = Math.max(2, (this.fixCount / 2) + 1);
+ final Dynamic result = this.chainFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.input,
+ this.fromVersion,
+ new DataVersion(halfwayVersion));
+ blackhole.consume(result);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
new file mode 100644
index 0000000..d895b69
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2025 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.core;
+
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.schema.Schema;
+import de.splatgames.aether.datafixers.api.schema.SchemaRegistry;
+import de.splatgames.aether.datafixers.core.schema.SimpleSchemaRegistry;
+import de.splatgames.aether.datafixers.testkit.factory.MockSchemas;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for schema registry lookup performance.
+ *
+ *
Measures the performance of {@link SchemaRegistry#get(DataVersion)}
+ * with varying registry sizes. Uses floor semantics (finds greatest version
+ * less than or equal to requested).
+ *
+ * @author Erik Pförtner
+ * @since 1.0.0
+ */
+@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@State(Scope.Benchmark)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
+public class SchemaLookupBenchmark {
+
+ @Param({"10", "50", "100", "500"})
+ private int schemaCount;
+
+ private SchemaRegistry registry;
+ private DataVersion[] versions;
+ private DataVersion[] lookupVersions;
+ private Random random;
+
+ @Setup(Level.Trial)
+ public void setup() {
+ // Create registry with specified number of schemas
+ final SimpleSchemaRegistry simpleRegistry = new SimpleSchemaRegistry();
+ this.versions = new DataVersion[this.schemaCount];
+
+ for (int i = 0; i < this.schemaCount; i++) {
+ final int version = (i + 1) * 10; // 10, 20, 30, ...
+ this.versions[i] = new DataVersion(version);
+ simpleRegistry.register(MockSchemas.minimal(version));
+ }
+ simpleRegistry.freeze();
+ this.registry = simpleRegistry;
+
+ // Create lookup versions (including versions between registered versions)
+ this.lookupVersions = new DataVersion[this.schemaCount * 2];
+ for (int i = 0; i < this.lookupVersions.length; i++) {
+ this.lookupVersions[i] = new DataVersion((i + 1) * 5); // 5, 10, 15, ...
+ }
+
+ this.random = new Random(42); // Fixed seed for reproducibility
+ }
+
+ /**
+ * Benchmarks looking up an exact registered version.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void exactLookup(final Blackhole blackhole) {
+ final int index = this.random.nextInt(this.schemaCount);
+ final Schema schema = this.registry.get(this.versions[index]);
+ blackhole.consume(schema);
+ }
+
+ /**
+ * Benchmarks looking up a version using floor semantics.
+ *
+ *
Half of the lookups will be for exact versions, half will
+ * require floor resolution.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void floorLookup(final Blackhole blackhole) {
+ final int index = this.random.nextInt(this.lookupVersions.length);
+ final Schema schema = this.registry.get(this.lookupVersions[index]);
+ blackhole.consume(schema);
+ }
+
+ /**
+ * Benchmarks getting the latest schema.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void latestLookup(final Blackhole blackhole) {
+ final Schema schema = this.registry.latest();
+ blackhole.consume(schema);
+ }
+
+ /**
+ * Benchmarks sequential lookup of all versions.
+ *
+ *
Measures cache-friendly access patterns.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void sequentialLookup(final Blackhole blackhole) {
+ for (final DataVersion version : this.versions) {
+ final Schema schema = this.registry.get(version);
+ blackhole.consume(schema);
+ }
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
new file mode 100644
index 0000000..9e61504
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2025 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.core;
+
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.DataVersion;
+import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
+import de.splatgames.aether.datafixers.api.fix.DataFixer;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for single DataFix application performance.
+ *
+ *
Measures the overhead of applying a single fix to data of varying sizes.
+ * Includes a baseline identity fix measurement to isolate framework overhead.
Measures framework overhead without actual data transformation.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void identityFix(final Blackhole blackhole) {
+ final Dynamic result = this.identityFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.input,
+ this.fromVersion,
+ this.toVersion);
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks applying a fix to player-like data structure.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void playerDataFix(final Blackhole blackhole) {
+ final Dynamic playerInput = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE);
+ final DataFixer playerFixer = BenchmarkBootstrap.createPlayerFixer();
+ final Dynamic result = playerFixer.update(
+ BenchmarkBootstrap.PLAYER_TYPE,
+ playerInput,
+ new DataVersion(1),
+ new DataVersion(2));
+ blackhole.consume(result);
+ }
+}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
new file mode 100644
index 0000000..b725afa
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2025 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package de.splatgames.aether.datafixers.benchmarks.format;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.google.gson.JsonElement;
+import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
+import de.splatgames.aether.datafixers.api.dynamic.DynamicOps;
+import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
+import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
+import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
+import de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps;
+import de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps;
+import de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * JMH benchmark for cross-format conversion performance.
+ *
+ *
Measures the overhead of converting data between different
+ * DynamicOps implementations using {@link DynamicOps#convertTo}.
Measures the performance of renaming one field in the input data.
+ * This represents a common, lightweight migration operation. The benchmark is parameterized by {@link PayloadSize}
+ * to measure scaling behavior.
+ *
+ * @param s the shared benchmark state containing fixer and input data
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void singleRenameFix(final SizedState s, final Blackhole blackhole) {
+ blackhole.consume(s.fixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ s.input,
+ s.fromVersion,
+ s.toVersion));
}
/**
- * Benchmarks applying a single rename field fix.
+ * Benchmarks the identity (no-op) fix as a baseline measurement.
+ *
+ *
Measures pure framework overhead without any actual data transformation.
+ * Use this as a baseline to calculate the true cost of transformations by subtracting identity time from other
+ * benchmark results.
*
+ * @param s the shared benchmark state containing identity fixer and input data
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void singleRenameFix(final Blackhole blackhole) {
- final Dynamic result = this.fixer.update(
+ public void identityFix(final SizedState s, final Blackhole blackhole) {
+ blackhole.consume(s.identityFixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
- this.input,
- this.fromVersion,
- this.toVersion);
- blackhole.consume(result);
+ s.input,
+ s.fromVersion,
+ s.toVersion));
}
/**
- * Baseline benchmark with identity fix (no transformation).
+ * Benchmarks a complex player data transformation with codec roundtrip.
*
- *
Measures framework overhead without actual data transformation.
+ *
Measures the performance of a realistic migration scenario where data
+ * is decoded via codec, transformed, and re-encoded. This represents the upper bound of migration cost for complex
+ * object transformations.
*
+ *
Expected performance: ~17-18 μs/op (significantly slower due to codec overhead)
+ *
+ *
The ~70x slowdown compared to {@link #singleRenameFix} is expected and
+ * acceptable, as codec roundtrips involve reflection, object instantiation, and full serialization/deserialization
+ * cycles.
+ *
+ * @param s the shared player benchmark state
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void identityFix(final Blackhole blackhole) {
- final Dynamic result = this.identityFixer.update(
- BenchmarkBootstrap.BENCHMARK_TYPE,
- this.input,
- this.fromVersion,
- this.toVersion);
- blackhole.consume(result);
+ public void playerDataFix(final PlayerState s,
+ final Blackhole blackhole) {
+ blackhole.consume(s.playerFixer.update(
+ BenchmarkBootstrap.PLAYER_TYPE,
+ s.playerInput,
+ s.fromVersion,
+ s.toVersion));
}
/**
- * Benchmarks applying a fix to player-like data structure.
+ * Benchmarks the complete end-to-end pipeline including setup overhead.
+ *
+ *
Measures the total cost of a migration including:
+ *
+ *
Test data generation
+ *
DataFixer bootstrap and initialization
+ *
Actual migration execution
+ *
+ *
+ *
This benchmark is useful for understanding cold-start performance
+ * and the cost of creating new DataFixer instances. In production code,
+ * DataFixers should be reused rather than recreated per-operation.
+ *
+ *
Note: Results will be significantly slower than {@link #playerDataFix}
+ * due to setup overhead included in each iteration.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void playerDataFix(final Blackhole blackhole) {
+ public void playerDataFixEndToEnd(final Blackhole blackhole) {
final Dynamic playerInput = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE);
final DataFixer playerFixer = BenchmarkBootstrap.createPlayerFixer();
- final Dynamic result = playerFixer.update(
+ blackhole.consume(playerFixer.update(
BenchmarkBootstrap.PLAYER_TYPE,
playerInput,
new DataVersion(1),
- new DataVersion(2));
- blackhole.consume(result);
+ new DataVersion(2)));
+ }
+
+ /**
+ * Shared JMH state for benchmarks parameterized by payload size.
+ *
+ *
This state is shared across all threads within a benchmark trial
+ * ({@link Scope#Benchmark}). The {@link #payloadSize} parameter controls the complexity of test data:
+ *
+ *
+ *
SMALL: 5 fields, 2 nesting levels, 10 array elements
+ *
MEDIUM: 20 fields, 4 nesting levels, 100 array elements
+ *
LARGE: 50 fields, 6 nesting levels, 1000 array elements
+ *
+ *
+ * @see PayloadSize
+ */
+ @State(Scope.Benchmark)
+ public static class SizedState {
+
+ /**
+ * The payload size parameter, injected by JMH. Controls the complexity of generated test data.
+ */
+ @Param({"SMALL", "MEDIUM", "LARGE"})
+ public PayloadSize payloadSize;
+
+ /**
+ * DataFixer configured with a single field rename fix (v1 → v2).
+ */
+ public DataFixer fixer;
+
+ /**
+ * DataFixer configured with an identity (no-op) fix for baseline measurement.
+ */
+ public DataFixer identityFixer;
+
+ /**
+ * Pre-generated input data matching {@link #payloadSize}.
+ */
+ public Dynamic input;
+
+ /**
+ * Source version for migrations (v1).
+ */
+ public DataVersion fromVersion;
+
+ /**
+ * Target version for migrations (v2).
+ */
+ public DataVersion toVersion;
+
+ /**
+ * Initializes the benchmark state once per trial.
+ *
+ *
Creates fixers and generates test data based on the current
+ * {@link #payloadSize} parameter value.
This state is separate from {@link SizedState} because the player benchmark
+ * uses a fixed, realistic data structure rather than parameterized payload sizes. The player data simulates a
+ * typical game entity with nested objects, arrays, and various field types.
+ *
+ *
The player fix performs a complete codec roundtrip transformation,
+ * making it representative of real-world migration scenarios where data is decoded, transformed, and
+ * re-encoded.
+ *
+ * @see BenchmarkBootstrap#createPlayerFixer()
+ * @see BenchmarkDataGenerator#generatePlayerData
+ */
+ @State(Scope.Benchmark)
+ public static class PlayerState {
+
+ /**
+ * DataFixer configured with a player-specific transformation fix. Performs codec decode → transform → encode
+ * cycle.
+ */
+ public DataFixer playerFixer;
+
+ /**
+ * Pre-generated player data structure with realistic game entity fields.
+ */
+ public Dynamic playerInput;
+
+ /**
+ * Source version for migrations (v1).
+ */
+ public DataVersion fromVersion;
+
+ /**
+ * Target version for migrations (v2).
+ */
+ public DataVersion toVersion;
+
+ /**
+ * Initializes the player benchmark state once per trial.
+ *
+ *
Creates the player fixer and generates realistic player test data.
+ */
+ @Setup(Level.Trial)
+ public void setup() {
+ this.playerFixer = BenchmarkBootstrap.createPlayerFixer();
+ this.playerInput = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE);
+ this.fromVersion = new DataVersion(1);
+ this.toVersion = new DataVersion(2);
+ }
}
}
From ccb0fb9c9516258e559782dbc52035f52481e31e Mon Sep 17 00:00:00 2001
From: Erik
Date: Tue, 20 Jan 2026 21:08:37 +0100
Subject: [PATCH 04/10] Remove expected performance notes from
`SingleFixBenchmark` Javadoc for cleaner documentation.
---
.../datafixers/benchmarks/core/SingleFixBenchmark.java | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
index 71dbef4..2ff7c49 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
@@ -110,8 +110,6 @@ public class SingleFixBenchmark {
* This represents a common, lightweight migration operation. The benchmark is parameterized by {@link PayloadSize}
* to measure scaling behavior.
*
- *
- *
* @param s the shared benchmark state containing fixer and input data
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@@ -131,8 +129,6 @@ public void singleRenameFix(final SizedState s, final Blackhole blackhole) {
* Use this as a baseline to calculate the true cost of transformations by subtracting identity time from other
* benchmark results.
*
- *
- *
* @param s the shared benchmark state containing identity fixer and input data
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@@ -152,10 +148,8 @@ public void identityFix(final SizedState s, final Blackhole blackhole) {
* is decoded via codec, transformed, and re-encoded. This represents the upper bound of migration cost for complex
* object transformations.
*
- *
Expected performance: ~17-18 μs/op (significantly slower due to codec overhead)
- *
- *
The ~70x slowdown compared to {@link #singleRenameFix} is expected and
- * acceptable, as codec roundtrips involve reflection, object instantiation, and full serialization/deserialization
+ *
This benchmark is expected to be significantly slower than {@link #singleRenameFix}
+ * because codec roundtrips involve reflection, object instantiation, and full serialization/deserialization
* cycles.
*
* @param s the shared player benchmark state
From 99e73e34111e0c841f4d89d46a44a96a5efb58d1 Mon Sep 17 00:00:00 2001
From: Erik
Date: Mon, 26 Jan 2026 22:10:18 +0100
Subject: [PATCH 05/10] Enhance MultiFixChainBenchmark and
SchemaLookupBenchmark with detailed documentation and improved
parameterization for better performance insights
---
.../core/MultiFixChainBenchmark.java | 204 +++++++++--
.../core/SchemaLookupBenchmark.java | 342 +++++++++++++++---
.../benchmarks/core/package-info.java | 90 +++++
3 files changed, 560 insertions(+), 76 deletions(-)
create mode 100644 aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
index 90818ff..e9f0129 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
@@ -47,13 +47,63 @@
import java.util.concurrent.TimeUnit;
/**
- * JMH benchmark for multi-fix chain migration performance.
+ * JMH benchmark for chained DataFix application performance.
*
- *
Measures the performance of applying multiple sequential fixes,
- * simulating real-world migration scenarios where data may need to
- * traverse many version upgrades.
+ *
Measures how fix chain length affects migration performance. This benchmark
+ * is essential for understanding the scalability characteristics of the DataFixer
+ * system when applying multiple sequential fixes.
+ *
+ *
Benchmark Methods
+ *
+ *
{@link #renameChain} - Chain of homogeneous field rename operations
+ *
{@link #mixedChain} - Chain of heterogeneous operations (renames, additions, transformations)
+ *
{@link #partialChain} - Partial chain execution stopping at halfway version
+ *
+ *
+ *
Parameters
+ *
+ *
Parameter
Values
Description
+ *
fixCount
1, 5, 10, 25, 50
Number of fixes in the chain
+ *
payloadSize
SMALL, MEDIUM
Input data complexity
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2 (for statistical significance)
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Microseconds
+ *
+ *
+ *
Interpreting Results
+ *
+ *
Linear scaling: Ideal behavior where time scales proportionally with fix count.
+ *
Sub-linear scaling: Better than expected, indicates optimization opportunities being exploited.
+ *
Super-linear scaling: Indicates potential performance issues with long chains.
+ *
Error (±): 99.9% confidence interval. Larger values with more fixes may indicate GC pressure.
+ *
+ *
+ *
Usage
+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar MultiFixChainBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar MultiFixChainBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific fix count and payload size
+ * java -jar benchmarks.jar MultiFixChainBenchmark -p fixCount=10 -p payloadSize=SMALL
+ *
+ * # Generate CSV output for analysis
+ * java -jar benchmarks.jar MultiFixChainBenchmark -rf csv -rff chain_results.csv
+ * }
*
* @author Erik Pförtner
+ * @see SingleFixBenchmark
+ * @see BenchmarkBootstrap#createChainFixer(int)
+ * @see BenchmarkBootstrap#createMixedFixer(int)
* @since 1.0.0
*/
@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
@@ -64,34 +114,130 @@
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
public class MultiFixChainBenchmark {
+ /**
+ * The number of fixes in the chain, injected by JMH.
+ *
+ *
This parameter controls the length of the fix chain being benchmarked.
+ * Higher values test the system's ability to handle long migration paths
+ * efficiently.
+ *
+ *
+ *
1: Baseline single-fix performance (compare with {@link SingleFixBenchmark})
+ *
5: Short chain typical of minor version updates
+ *
10: Medium chain representing moderate version gaps
+ *
25: Long chain simulating significant version jumps
+ *
50: Stress test for extended migration paths
+ *
+ */
@Param({"1", "5", "10", "25", "50"})
private int fixCount;
+ /**
+ * The payload size parameter, injected by JMH.
+ *
+ *
Controls the complexity of generated test data. Only SMALL and MEDIUM
+ * sizes are used to keep benchmark runtime reasonable while still capturing
+ * scaling behavior.
+ *
+ * @see PayloadSize
+ */
@Param({"SMALL", "MEDIUM"})
private PayloadSize payloadSize;
+ /**
+ * DataFixer configured with a chain of homogeneous field rename fixes.
+ *
+ *
Each fix in the chain performs a simple field rename operation (v{@code n} → v{@code n+1}).
+ * This represents the best-case scenario for chain execution.
+ */
private DataFixer chainFixer;
+
+ /**
+ * DataFixer configured with a chain of heterogeneous fix operations.
+ *
+ *
The chain includes a mix of rename, add, and transform operations to
+ * simulate realistic migration scenarios. Falls back to {@link #chainFixer}
+ * if mixed fixer creation fails.
Regenerated at each iteration to ensure consistent GC behavior
+ * and avoid caching effects.
+ */
private Dynamic input;
+
+ /**
+ * Source version for migrations (always v1).
+ */
private DataVersion fromVersion;
+
+ /**
+ * Target version for full chain migrations (v{@link #fixCount} + 1).
+ */
private DataVersion toVersion;
+ /**
+ * Target version for partial chain migrations (approximately half of {@link #toVersion}).
+ *
+ *
Used by {@link #partialChain} to measure performance when only part
+ * of the available fixes are applied.
+ */
+ private DataVersion halfwayToVersion;
+
+ /**
+ * Initializes the benchmark state once per trial.
+ *
+ *
Creates the chain and mixed fixers based on the current {@link #fixCount}
+ * parameter. Also calculates the version bounds for full and partial chain
+ * execution.
+ *
+ *
If mixed fixer creation fails (e.g., due to unsupported operations),
+ * the chain fixer is used as a fallback to ensure the benchmark can still run.
+ */
@Setup(Level.Trial)
- public void setup() {
+ public void setupTrial() {
this.chainFixer = BenchmarkBootstrap.createChainFixer(this.fixCount);
- if (this.fixCount >= 4) {
+
+ try {
this.mixedFixer = BenchmarkBootstrap.createMixedFixer(this.fixCount);
+ } catch (final RuntimeException ex) {
+ this.mixedFixer = this.chainFixer;
}
- this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, this.payloadSize);
+
this.fromVersion = new DataVersion(1);
this.toVersion = new DataVersion(this.fixCount + 1);
+
+ final int halfwayVersion = Math.max(2, (this.fixCount / 2) + 1);
+ this.halfwayToVersion = new DataVersion(halfwayVersion);
+ }
+
+ /**
+ * Regenerates input data at each iteration.
+ *
+ *
Fresh data generation per iteration ensures that:
+ *
+ *
GC behavior is consistent across iterations
+ *
JIT optimizations don't over-specialize on specific data patterns
+ *
Memory allocation patterns are representative of real usage
+ *
+ */
+ @Setup(Level.Iteration)
+ public void setupIteration() {
+ this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, this.payloadSize);
}
/**
- * Benchmarks applying a chain of rename fixes.
+ * Benchmarks a chain of homogeneous field rename operations.
*
- *
All fixes in the chain perform the same operation type (rename),
- * measuring sequential fix application overhead.
+ *
Measures the performance of applying {@link #fixCount} sequential rename
+ * fixes to migrate data from v1 to v{@code fixCount+1}. This represents an
+ * optimistic scenario where all fixes perform the same lightweight operation.
+ *
+ *
Use this benchmark to establish baseline chain performance and detect
+ * any non-linear scaling behavior in the fix application pipeline.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@@ -106,20 +252,24 @@ public void renameChain(final Blackhole blackhole) {
}
/**
- * Benchmarks applying a chain of mixed fix types.
+ * Benchmarks a chain of heterogeneous fix operations.
+ *
+ *
Measures the performance of applying {@link #fixCount} sequential fixes
+ * that include a mix of operations:
+ *
+ *
Field renames
+ *
Field additions with default values
+ *
Field transformations (type conversions, value mappings)
+ *
*
- *
Includes rename, add, remove, and transform operations
- * for more realistic migration scenarios.
+ *
This benchmark provides a more realistic performance profile compared
+ * to {@link #renameChain}, as real-world migrations typically involve
+ * diverse operations.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void mixedChain(final Blackhole blackhole) {
- if (this.mixedFixer == null) {
- // Skip for fixCount < 4
- blackhole.consume(this.input);
- return;
- }
final Dynamic result = this.mixedFixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
this.input,
@@ -129,21 +279,29 @@ public void mixedChain(final Blackhole blackhole) {
}
/**
- * Benchmarks partial migration (half the chain).
+ * Benchmarks partial chain execution stopping at halfway version.
+ *
+ *
Measures the performance of applying only half of the available fixes
+ * in the chain. This simulates scenarios where:
+ *
+ *
Data is migrated incrementally rather than to the latest version
+ *
Target version is not the most recent available
+ *
Partial upgrades are performed for compatibility reasons
+ *
*
- *
Measures performance when migrating to an intermediate version
- * rather than the latest version.
+ *
Comparing this benchmark with {@link #renameChain} reveals whether
+ * fix selection and version range calculations add significant overhead.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void partialChain(final Blackhole blackhole) {
- final int halfwayVersion = Math.max(2, (this.fixCount / 2) + 1);
final Dynamic result = this.chainFixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
this.input,
this.fromVersion,
- new DataVersion(halfwayVersion));
+ this.halfwayToVersion
+ );
blackhole.consume(result);
}
}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
index d895b69..a8dbb42 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
@@ -41,17 +41,66 @@
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
-import java.util.Random;
+import java.util.SplittableRandom;
import java.util.concurrent.TimeUnit;
/**
* JMH benchmark for schema registry lookup performance.
*
- *
Measures the performance of {@link SchemaRegistry#get(DataVersion)}
- * with varying registry sizes. Uses floor semantics (finds greatest version
- * less than or equal to requested).
+ *
Measures the overhead of various schema lookup operations as registry size grows.
+ * Schema lookups are performed frequently during data migration, so their performance directly impacts overall
+ * migration throughput.
+ *
+ *
Benchmark Methods
+ *
+ *
{@link #exactLookup} - Direct lookup by exact version match
+ *
{@link #floorLookup} - Floor lookup finding closest version ≤ target
+ *
{@link #latestLookup} - Retrieval of the most recent schema
+ *
{@link #sequentialLookup} - Sequential traversal of all registered versions
+ *
+ *
+ *
Parameters
+ *
+ *
Parameter
Values
Description
+ *
schemaCount
10, 50, 100, 500
Number of schemas in the registry
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2 (for statistical significance)
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Nanoseconds
+ *
+ *
+ *
Interpreting Results
+ *
+ *
O(1) lookups: {@link #exactLookup} and {@link #latestLookup} should show constant time regardless of registry size.
+ *
O(log n) lookups: {@link #floorLookup} may show logarithmic scaling if implemented via binary search.
+ *
O(n) lookups: {@link #sequentialLookup} should scale linearly with schema count.
+ *
Cache effects: Larger registries may show increased lookup time due to CPU cache pressure.
+ *
+ *
+ *
Usage
+ *
{@code
+ * # Run only this benchmark
+ * java -jar benchmarks.jar SchemaLookupBenchmark
+ *
+ * # Quick test with reduced iterations
+ * java -jar benchmarks.jar SchemaLookupBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Specific schema count only
+ * java -jar benchmarks.jar SchemaLookupBenchmark -p schemaCount=100
+ *
+ * # Run specific lookup benchmark
+ * java -jar benchmarks.jar SchemaLookupBenchmark.exactLookup
+ * }
*
* @author Erik Pförtner
+ * @see SchemaRegistry
+ * @see SimpleSchemaRegistry
* @since 1.0.0
*/
@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
@@ -62,87 +111,274 @@
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
public class SchemaLookupBenchmark {
- @Param({"10", "50", "100", "500"})
- private int schemaCount;
-
- private SchemaRegistry registry;
- private DataVersion[] versions;
- private DataVersion[] lookupVersions;
- private Random random;
-
- @Setup(Level.Trial)
- public void setup() {
- // Create registry with specified number of schemas
- final SimpleSchemaRegistry simpleRegistry = new SimpleSchemaRegistry();
- this.versions = new DataVersion[this.schemaCount];
-
- for (int i = 0; i < this.schemaCount; i++) {
- final int version = (i + 1) * 10; // 10, 20, 30, ...
- this.versions[i] = new DataVersion(version);
- simpleRegistry.register(MockSchemas.minimal(version));
- }
- simpleRegistry.freeze();
- this.registry = simpleRegistry;
-
- // Create lookup versions (including versions between registered versions)
- this.lookupVersions = new DataVersion[this.schemaCount * 2];
- for (int i = 0; i < this.lookupVersions.length; i++) {
- this.lookupVersions[i] = new DataVersion((i + 1) * 5); // 5, 10, 15, ...
- }
-
- this.random = new Random(42); // Fixed seed for reproducibility
- }
-
/**
- * Benchmarks looking up an exact registered version.
+ * Benchmarks exact version lookup performance.
+ *
+ *
Measures the time to retrieve a schema by its exact registered version.
+ * This is the most common lookup pattern during migration when the source version is known precisely.
+ *
+ *
The benchmark uses pre-generated random indices to avoid RNG overhead
+ * in the measurement loop. Each invocation looks up a different random version to prevent branch prediction
+ * optimization.
*
+ * @param s the shared benchmark state containing the registry and versions
+ * @param t the per-thread state providing random lookup indices
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void exactLookup(final Blackhole blackhole) {
- final int index = this.random.nextInt(this.schemaCount);
- final Schema schema = this.registry.get(this.versions[index]);
+ public void exactLookup(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final int index = t.nextExactIndex();
+ final Schema schema = s.registry.get(s.versions[index]);
blackhole.consume(schema);
}
/**
- * Benchmarks looking up a version using floor semantics.
+ * Benchmarks floor lookup performance.
*
- *
Half of the lookups will be for exact versions, half will
- * require floor resolution.
+ *
Measures the time to retrieve a schema using floor semantics, where
+ * the registry returns the schema with the highest version ≤ the requested version. This pattern is used when
+ * data may be at intermediate versions not explicitly registered.
*
+ *
The lookup versions include both exact matches (10, 20, 30, ...) and
+ * in-between values (5, 15, 25, ...) to exercise both fast-path exact matches and slower floor searches.
+ *
+ * @param s the shared benchmark state containing the registry and lookup versions
+ * @param t the per-thread state providing random lookup indices
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void floorLookup(final Blackhole blackhole) {
- final int index = this.random.nextInt(this.lookupVersions.length);
- final Schema schema = this.registry.get(this.lookupVersions[index]);
+ public void floorLookup(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final int index = t.nextFloorIndex();
+ final Schema schema = s.registry.get(s.lookupVersions[index]);
blackhole.consume(schema);
}
/**
- * Benchmarks getting the latest schema.
+ * Benchmarks latest schema retrieval performance.
+ *
+ *
Measures the time to retrieve the most recent schema from the registry.
+ * This operation should be O(1) as the latest schema is typically cached or stored in a dedicated field.
*
+ *
This benchmark serves as a baseline for the fastest possible lookup
+ * operation and helps identify any unexpected overhead in the registry implementation.
+ *
+ * @param s the shared benchmark state containing the registry
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void latestLookup(final Blackhole blackhole) {
- final Schema schema = this.registry.latest();
+ public void latestLookup(final BenchmarkState s,
+ final Blackhole blackhole) {
+ final Schema schema = s.registry.latest();
blackhole.consume(schema);
}
/**
- * Benchmarks sequential lookup of all versions.
+ * Benchmarks sequential lookup of all registered schemas.
+ *
+ *
Measures the aggregate time to look up every schema in the registry
+ * in version order. This pattern occurs during schema validation, debugging, or when building migration path
+ * analyses.
*
- *
Measures cache-friendly access patterns.
+ *
Note: This benchmark performs multiple lookups per invocation
+ * ({@code schemaCount} lookups). The reported time is for the entire sequence, not per-lookup. Divide by
+ * {@code schemaCount} to get per-lookup overhead.
*
+ * @param s the shared benchmark state containing the registry and versions
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void sequentialLookup(final Blackhole blackhole) {
- for (final DataVersion version : this.versions) {
- final Schema schema = this.registry.get(version);
+ public void sequentialLookup(final BenchmarkState s,
+ final Blackhole blackhole) {
+ for (final DataVersion version : s.versions) {
+ final Schema schema = s.registry.get(version);
blackhole.consume(schema);
}
}
+
+ /**
+ * Shared JMH state containing the schema registry and version arrays.
+ *
+ *
This state is shared across all threads within a benchmark trial
+ * ({@link Scope#Benchmark}). The registry is populated with mock schemas at versions 10, 20, 30, ... up to
+ * {@code schemaCount * 10}.
+ *
+ *
The registry is frozen after setup to match production usage patterns
+ * where registries are immutable during normal operation.
+ */
+ @State(Scope.Benchmark)
+ public static class BenchmarkState {
+
+ /**
+ * The number of schemas to register, injected by JMH.
+ *
+ *
Controls the size of the schema registry to measure lookup
+ * performance scaling:
+ *
+ *
10: Small registry, fits entirely in L1 cache
+ *
50: Medium registry, typical for most applications
+ *
100: Large registry, may exceed L1 cache
+ *
500: Stress test for registry scalability
+ *
+ */
+ @Param({"10", "50", "100", "500"})
+ public int schemaCount;
+
+ /**
+ * The frozen schema registry containing all registered schemas.
+ */
+ public SchemaRegistry registry;
+
+ /**
+ * Array of exact registered versions (10, 20, 30, ...).
+ *
+ *
Used by {@link #exactLookup} to ensure lookups always hit
+ * registered versions.
+ */
+ public DataVersion[] versions;
+
+ /**
+ * Array of lookup versions including in-between values (5, 10, 15, 20, ...).
+ *
+ *
Used by {@link #floorLookup} to exercise both exact matches
+ * and floor search behavior.
+ */
+ public DataVersion[] lookupVersions;
+
+ /**
+ * Initializes the schema registry and version arrays once per trial.
+ *
+ *
Creates a {@link SimpleSchemaRegistry} populated with minimal mock
+ * schemas at regular version intervals. The registry is frozen after population to enable any internal
+ * optimizations.
+ */
+ @Setup(Level.Trial)
+ public void setup() {
+ final SimpleSchemaRegistry simpleRegistry = new SimpleSchemaRegistry();
+ this.versions = new DataVersion[this.schemaCount];
+
+ for (int i = 0; i < this.schemaCount; i++) {
+ final int version = (i + 1) * 10;
+ final DataVersion dataVersion = new DataVersion(version);
+ this.versions[i] = dataVersion;
+ simpleRegistry.register(MockSchemas.minimal(version));
+ }
+
+ simpleRegistry.freeze();
+ this.registry = simpleRegistry;
+
+ this.lookupVersions = new DataVersion[this.schemaCount * 2];
+ for (int i = 0; i < this.lookupVersions.length; i++) {
+ this.lookupVersions[i] = new DataVersion((i + 1) * 5);
+ }
+ }
+ }
+
+ /**
+ * Per-thread JMH state providing pre-generated random lookup indices.
+ *
+ *
Random number generation is expensive and would dominate the benchmark
+ * if performed in the hot path. This state pre-generates buffers of random indices during setup, allowing the
+ * benchmark methods to retrieve indices via simple array access and bit masking.
+ *
+ *
Each thread has its own state instance ({@link Scope#Thread}) to avoid
+ * contention on shared RNG state. The fixed seed ensures reproducible results across benchmark runs.
+ *
+ * @see BenchmarkState
+ */
+ @State(Scope.Thread)
+ public static class ThreadState {
+
+ /**
+ * Size of the pre-generated index buffer.
+ *
+ *
Power-of-two size enables cheap index wrapping via bit masking
+ * instead of modulo operation.
+ */
+ private static final int INDEX_BUFFER_SIZE = 1024;
+
+ /**
+ * Bit mask for wrapping cursor to buffer bounds ({@code INDEX_BUFFER_SIZE - 1}).
+ */
+ private static final int INDEX_MASK = INDEX_BUFFER_SIZE - 1;
+
+ /**
+ * Pre-generated indices into {@link BenchmarkState#versions}.
+ */
+ private final int[] exactIndices = new int[INDEX_BUFFER_SIZE];
+
+ /**
+ * Pre-generated indices into {@link BenchmarkState#lookupVersions}.
+ */
+ private final int[] floorIndices = new int[INDEX_BUFFER_SIZE];
+
+ /**
+ * Current position in {@link #exactIndices}.
+ */
+ private int exactCursor;
+
+ /**
+ * Current position in {@link #floorIndices}.
+ */
+ private int floorCursor;
+
+ /**
+ * Thread-local random number generator for index generation.
+ */
+ private SplittableRandom random;
+
+ /**
+ * Initializes the random number generator once per trial.
+ *
+ *
Uses a fixed seed (42) for reproducibility. Each thread gets its
+ * own {@link SplittableRandom} instance to avoid synchronization overhead.
+ */
+ @Setup(Level.Trial)
+ public void setupTrial() {
+ this.random = new SplittableRandom(42L);
+ }
+
+ /**
+ * Refills the index buffers at each iteration.
+ *
+ *
Generates fresh random indices based on the current
+ * {@link BenchmarkState#schemaCount} parameter. Resets cursors to the beginning of each buffer.
+ *
+ * @param s the shared benchmark state providing array bounds
+ */
+ @Setup(Level.Iteration)
+ public void setupIteration(final BenchmarkState s) {
+ for (int i = 0; i < INDEX_BUFFER_SIZE; i++) {
+ this.exactIndices[i] = this.random.nextInt(s.versions.length);
+ this.floorIndices[i] = this.random.nextInt(s.lookupVersions.length);
+ }
+ this.exactCursor = 0;
+ this.floorCursor = 0;
+ }
+
+ /**
+ * Returns the next random index for exact version lookup.
+ *
+ *
Uses bit masking to wrap around the buffer efficiently.
+ *
+ * @return a random index into {@link BenchmarkState#versions}
+ */
+ public int nextExactIndex() {
+ return this.exactIndices[this.exactCursor++ & INDEX_MASK];
+ }
+
+ /**
+ * Returns the next random index for floor version lookup.
+ *
+ *
Uses bit masking to wrap around the buffer efficiently.
+ *
+ * @return a random index into {@link BenchmarkState#lookupVersions}
+ */
+ public int nextFloorIndex() {
+ return this.floorIndices[this.floorCursor++ & INDEX_MASK];
+ }
+ }
}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java
new file mode 100644
index 0000000..08423be
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2025 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * Core JMH benchmarks for the Aether DataFixers framework.
+ *
+ *
This package contains benchmarks that measure the fundamental performance characteristics
+ * of the data fixer system, including fix application, chain execution, and schema registry
+ * operations. These benchmarks form the foundation for performance regression testing and
+ * optimization efforts.
Isolation: Each benchmark measures a single operation to isolate performance characteristics.
+ *
Parameterization: Benchmarks are parameterized to capture scaling behavior across different input sizes.
+ *
Reproducibility: Fixed seeds and deterministic data generation ensure reproducible results.
+ *
JMH Best Practices: All benchmarks follow JMH guidelines including proper use of {@code Blackhole},
+ * state scoping, and setup level annotations.
+ *
+ *
+ *
Interpreting Results
+ *
All benchmarks in this package report both throughput (ops/time) and average time (time/op).
+ * When comparing results:
+ *
+ *
Compare measurements from the same JVM version and hardware
+ *
Consider the 99.9% confidence interval (error bounds)
+ *
Run multiple forks to account for JIT compilation variance
+ *
Use baseline benchmarks (e.g., identity fix) to isolate framework overhead
+ *
+ *
+ * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap
+ * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator
+ * @since 1.0.0
+ */
+package de.splatgames.aether.datafixers.benchmarks.core;
From 58d5f6c06618b2062dbf3e11254e0031a714d486 Mon Sep 17 00:00:00 2001
From: Erik
Date: Mon, 26 Jan 2026 22:17:47 +0100
Subject: [PATCH 06/10] Update copyright year to 2026 in benchmark files
---
.../aether/datafixers/benchmarks/BenchmarkRunner.java | 2 +-
.../datafixers/benchmarks/codec/CollectionCodecBenchmark.java | 2 +-
.../datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java | 3 +--
.../benchmarks/concurrent/ConcurrentMigrationBenchmark.java | 2 +-
.../datafixers/benchmarks/core/MultiFixChainBenchmark.java | 2 +-
.../datafixers/benchmarks/core/SchemaLookupBenchmark.java | 2 +-
.../aether/datafixers/benchmarks/core/SingleFixBenchmark.java | 2 +-
.../aether/datafixers/benchmarks/core/package-info.java | 2 +-
.../datafixers/benchmarks/format/CrossFormatBenchmark.java | 2 +-
.../aether/datafixers/benchmarks/format/JsonBenchmark.java | 2 +-
.../aether/datafixers/benchmarks/format/TomlXmlBenchmark.java | 2 +-
.../aether/datafixers/benchmarks/format/YamlBenchmark.java | 2 +-
.../aether/datafixers/benchmarks/util/BenchmarkBootstrap.java | 2 +-
.../datafixers/benchmarks/util/BenchmarkDataGenerator.java | 2 +-
.../aether/datafixers/benchmarks/util/PayloadSize.java | 2 +-
15 files changed, 15 insertions(+), 16 deletions(-)
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java
index 4467845..d8f91e8 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/BenchmarkRunner.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
index 2167a2d..a55b729 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
index 82b3d04..ad44bd4 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -23,7 +23,6 @@
package de.splatgames.aether.datafixers.benchmarks.codec;
import com.google.gson.JsonElement;
-import de.splatgames.aether.datafixers.api.codec.Codec;
import de.splatgames.aether.datafixers.api.codec.Codecs;
import de.splatgames.aether.datafixers.api.result.DataResult;
import de.splatgames.aether.datafixers.api.util.Pair;
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
index f402cf8..3afb77f 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
index e9f0129..2b3e535 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
index a8dbb42..0b72395 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
index 2ff7c49..e60fd60 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java
index 08423be..32b058f 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/package-info.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
index b725afa..0cd6961 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
index 545e222..5dcccb6 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
index 8d7107f..f618554 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
index 2959269..c387455 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkBootstrap.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkBootstrap.java
index 64b89d4..38a13f3 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkBootstrap.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkBootstrap.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java
index b20926e..7f48696 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java
index 90376fa..82fe8a3 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2025 Splatgames.de Software and Contributors
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
From 28dc75834f336c2921a1697bea56eb085176df39 Mon Sep 17 00:00:00 2001
From: Erik
Date: Thu, 29 Jan 2026 21:26:16 +0100
Subject: [PATCH 07/10] Add concurrent benchmarking utilities and comprehensive
Javadoc for `ConcurrentMigrationBenchmark` to enhance multithreaded
performance analysis.
---
.../ConcurrentMigrationBenchmark.java | 566 +++++++++++++++---
.../benchmarks/concurrent/package-info.java | 130 ++++
2 files changed, 604 insertions(+), 92 deletions(-)
create mode 100644 aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
index 3afb77f..a1830bf 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
@@ -49,16 +49,106 @@
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
-import java.util.Random;
+import java.util.SplittableRandom;
import java.util.concurrent.TimeUnit;
/**
- * JMH benchmark for concurrent migration and registry access performance.
+ * JMH benchmark for concurrent DataFixer operations and thread-safety validation.
*
- *
Measures the thread-safety and contention characteristics of the
- * DataFixer and SchemaRegistry under concurrent load.
+ *
This benchmark measures the performance characteristics of the DataFixer system
+ * under concurrent load. It validates thread-safety of shared components and quantifies
+ * scalability across different thread counts. The results help identify contention
+ * points and ensure the framework performs well in multi-threaded environments.
+ *
+ *
Benchmark Categories
+ *
+ *
Concurrent Migration Benchmarks
+ *
Measure DataFixer performance when multiple threads perform migrations simultaneously:
+ *
+ *
{@link #concurrentSingleFix} - Maximum parallelism with single-fix migrations
+ *
{@link #concurrentChainMigration} - Maximum parallelism with 10-fix chain migrations
+ *
{@link #fourThreadMigration} - Fixed 4-thread migration for baseline comparison
+ *
{@link #eightThreadMigration} - Fixed 8-thread migration for scaling analysis
+ *
+ *
+ *
Concurrent Registry Access Benchmarks
+ *
Measure SchemaRegistry performance under concurrent read pressure:
+ *
+ *
{@link #concurrentRegistryLookup} - Random version lookups from multiple threads
*
* @author Erik Pförtner
+ * @see de.splatgames.aether.datafixers.benchmarks.core.SingleFixBenchmark
+ * @see de.splatgames.aether.datafixers.benchmarks.core.MultiFixChainBenchmark
+ * @see BenchmarkBootstrap
* @since 1.0.0
*/
@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
@@ -69,151 +159,443 @@
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
public class ConcurrentMigrationBenchmark {
- @Param({"SMALL", "MEDIUM"})
- private PayloadSize payloadSize;
-
- // Shared state across threads
- private DataFixer sharedFixer;
- private DataFixer sharedChainFixer;
- private SchemaRegistry sharedRegistry;
- private DataVersion fromVersion;
- private DataVersion toVersion;
- private DataVersion chainToVersion;
- private DataVersion[] registryVersions;
-
- @Setup(Level.Trial)
- public void setup() {
- // Create shared fixer (thread-safe after freeze)
- this.sharedFixer = BenchmarkBootstrap.createSingleFixFixer();
- this.sharedChainFixer = BenchmarkBootstrap.createChainFixer(10);
- this.fromVersion = new DataVersion(1);
- this.toVersion = new DataVersion(2);
- this.chainToVersion = new DataVersion(11);
-
- // Create shared registry
- final SimpleSchemaRegistry registry = new SimpleSchemaRegistry();
- this.registryVersions = new DataVersion[100];
- for (int i = 0; i < 100; i++) {
- final int version = (i + 1) * 10;
- this.registryVersions[i] = new DataVersion(version);
- registry.register(MockSchemas.minimal(version));
- }
- registry.freeze();
- this.sharedRegistry = registry;
- }
-
- /**
- * Per-thread state for independent test data.
- */
- @State(Scope.Thread)
- public static class ThreadState {
-
- private Dynamic threadInput;
- private Random random;
-
- @Setup(Level.Iteration)
- public void setup(final ConcurrentMigrationBenchmark parent) {
- // Each thread gets its own input data
- this.threadInput = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, parent.payloadSize);
- this.random = new Random();
- }
- }
-
- // ==================== Concurrent Migration ====================
+ // ==================== Concurrent Migration Benchmarks ====================
/**
- * Benchmarks concurrent single-fix migrations using all available processors.
+ * Benchmarks concurrent single-fix migrations with maximum thread parallelism.
*
- * @param state per-thread state
+ *
All available CPU threads simultaneously apply a single DataFix to their
+ * respective input data. This benchmark stress-tests the thread-safety of the
+ * DataFixer implementation and measures maximum achievable throughput.
+ *
+ *
Key aspects measured:
+ *
+ *
Lock contention in shared DataFixer instance
+ *
Memory allocation pressure under concurrent load
+ *
Cache coherency effects from shared schema access
+ *
+ *
+ * @param s shared benchmark state containing the DataFixer and versions
+ * @param t per-thread state containing isolated input data
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
@Threads(Threads.MAX)
- public void concurrentSingleFix(final ThreadState state, final Blackhole blackhole) {
- final Dynamic result = this.sharedFixer.update(
+ public void concurrentSingleFix(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final Dynamic result = s.sharedFixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
- state.threadInput,
- this.fromVersion,
- this.toVersion);
+ t.threadInput,
+ s.fromVersion,
+ s.toVersion
+ );
blackhole.consume(result);
}
/**
- * Benchmarks concurrent chain migrations using all available processors.
+ * Benchmarks concurrent chain migrations with maximum thread parallelism.
+ *
+ *
All available CPU threads simultaneously apply a 10-fix chain migration.
+ * This benchmark combines the stress of concurrent access with the complexity
+ * of multi-step migrations, revealing performance characteristics under
+ * realistic high-load scenarios.
*
- * @param state per-thread state
+ *
Compared to {@link #concurrentSingleFix}, this benchmark:
Exercises fix ordering and version traversal logic concurrently
+ *
Creates higher memory allocation rates per thread
+ *
+ *
+ * @param s shared benchmark state containing the chain DataFixer
+ * @param t per-thread state containing isolated input data
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
@Threads(Threads.MAX)
- public void concurrentChainMigration(final ThreadState state, final Blackhole blackhole) {
- final Dynamic result = this.sharedChainFixer.update(
+ public void concurrentChainMigration(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final Dynamic result = s.sharedChainFixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
- state.threadInput,
- this.fromVersion,
- this.chainToVersion);
+ t.threadInput,
+ s.fromVersion,
+ s.chainToVersion
+ );
blackhole.consume(result);
}
/**
- * Benchmarks concurrent migrations with 4 threads.
+ * Benchmarks migration performance with exactly 4 concurrent threads.
+ *
+ *
Provides a fixed-thread baseline for comparing against variable-thread
+ * benchmarks. Four threads represent a typical server core count and help
+ * establish scaling characteristics between single-threaded and maximum
+ * parallelism scenarios.
*
- * @param state per-thread state
+ *
Use this benchmark to:
+ *
+ *
Establish baseline concurrent performance on quad-core systems
+ *
Compare with {@link #eightThreadMigration} to measure scaling factor
+ *
Identify the point where adding threads provides diminishing returns
+ *
+ *
+ * @param s shared benchmark state containing the DataFixer
+ * @param t per-thread state containing isolated input data
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
@Threads(4)
- public void fourThreadMigration(final ThreadState state, final Blackhole blackhole) {
- final Dynamic result = this.sharedFixer.update(
+ public void fourThreadMigration(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final Dynamic result = s.sharedFixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
- state.threadInput,
- this.fromVersion,
- this.toVersion);
+ t.threadInput,
+ s.fromVersion,
+ s.toVersion
+ );
blackhole.consume(result);
}
/**
- * Benchmarks concurrent migrations with 8 threads.
+ * Benchmarks migration performance with exactly 8 concurrent threads.
+ *
+ *
Tests scaling beyond the 4-thread baseline. Eight threads represent
+ * a common server configuration and help identify whether the DataFixer
+ * implementation scales efficiently with additional parallelism.
+ *
+ *
Scaling analysis:
+ *
+ *
2x throughput vs 4 threads: Perfect linear scaling
+ *
1.5-2x throughput: Good scaling with minor contention
+ *
<1.5x throughput: Contention limiting scalability
+ *
≤1x throughput: Severe contention; investigate locking
+ *
*
- * @param state per-thread state
+ * @param s shared benchmark state containing the DataFixer
+ * @param t per-thread state containing isolated input data
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
@Threads(8)
- public void eightThreadMigration(final ThreadState state, final Blackhole blackhole) {
- final Dynamic result = this.sharedFixer.update(
+ public void eightThreadMigration(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final Dynamic result = s.sharedFixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
- state.threadInput,
- this.fromVersion,
- this.toVersion);
+ t.threadInput,
+ s.fromVersion,
+ s.toVersion
+ );
blackhole.consume(result);
}
- // ==================== Concurrent Registry Access ====================
+ // ==================== Concurrent Registry Access Benchmarks ====================
/**
- * Benchmarks concurrent schema registry lookups.
+ * Benchmarks concurrent random schema lookups from the registry.
+ *
+ *
All available threads perform random version lookups against a shared
+ * {@link SchemaRegistry} containing 100 schema versions. This benchmark
+ * validates the thread-safety and performance of registry read operations
+ * under heavy concurrent access.
+ *
+ *
The benchmark uses pre-computed random indices (via {@link ThreadState#nextRegistryIndex()})
+ * to avoid RNG contention affecting measurements. Each thread cycles through
+ * a 1024-element buffer of random indices.
*
- * @param state per-thread state
+ *
Performance expectations:
+ *
+ *
Registry lookups should be lock-free and scale linearly
+ *
Cache effects may cause variance based on version access patterns
+ *
No write contention since registry is frozen before benchmarking
+ *
+ *
+ * @param s shared benchmark state containing the registry and versions
+ * @param t per-thread state providing random index sequence
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
@Threads(Threads.MAX)
- public void concurrentRegistryLookup(final ThreadState state, final Blackhole blackhole) {
- final int index = state.random.nextInt(this.registryVersions.length);
- final Schema schema = this.sharedRegistry.get(this.registryVersions[index]);
+ public void concurrentRegistryLookup(final BenchmarkState s,
+ final ThreadState t,
+ final Blackhole blackhole) {
+ final int index = t.nextRegistryIndex();
+ final Schema schema = s.sharedRegistry.get(s.registryVersions[index]);
blackhole.consume(schema);
}
/**
- * Benchmarks concurrent latest schema access.
+ * Benchmarks concurrent latest-schema lookups from the registry.
+ *
+ *
All available threads repeatedly call {@link SchemaRegistry#latest()}
+ * on a shared registry. This represents the "hot path" optimization where
+ * applications frequently need the most recent schema version.
+ *
+ *
This benchmark helps validate:
+ *
+ *
Caching effectiveness for the latest schema reference
+ *
Memory visibility of the cached latest schema across threads
+ *
Absence of unnecessary synchronization on read-only access
+ *
*
+ *
Expected to outperform {@link #concurrentRegistryLookup} due to:
+ *
+ *
No version-to-schema map lookup required
+ *
Single cached reference rather than computed lookup
+ *
Better CPU cache utilization from accessing same memory location
+ *
+ *
+ * @param s shared benchmark state containing the registry
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
@Threads(Threads.MAX)
- public void concurrentLatestLookup(final Blackhole blackhole) {
- final Schema schema = this.sharedRegistry.latest();
+ public void concurrentLatestLookup(final BenchmarkState s,
+ final Blackhole blackhole) {
+ final Schema schema = s.sharedRegistry.latest();
blackhole.consume(schema);
}
+
+ // ==================== State Classes ====================
+
+ /**
+ * Shared benchmark state accessible by all threads.
+ *
+ *
This state class contains all resources that are shared across benchmark
+ * threads, simulating real-world scenarios where a single DataFixer instance
+ * serves multiple concurrent requests.
+ *
+ *
State initialization occurs once per trial (before warmup begins) to
+ * ensure consistent starting conditions across all measurement iterations.
+ *
+ *
Shared Resources
+ *
+ *
{@link #sharedFixer} - Single-fix DataFixer for basic migration benchmarks
+ *
{@link #sharedChainFixer} - 10-fix chain DataFixer for chain migration benchmarks
+ *
{@link #sharedRegistry} - Frozen SchemaRegistry with 100 versions for lookup benchmarks
+ *
Version constants - Pre-computed DataVersion instances to avoid allocation during measurement
+ *
+ */
+ @State(Scope.Benchmark)
+ public static class BenchmarkState {
+
+ /**
+ * The payload size parameter, injected by JMH.
+ *
+ *
Controls the complexity of generated test data for each thread.
+ * Only SMALL and MEDIUM sizes are used to balance benchmark runtime
+ * with meaningful performance differentiation.
+ *
+ * @see PayloadSize
+ */
+ @Param({"SMALL", "MEDIUM"})
+ public PayloadSize payloadSize;
+
+ /**
+ * Shared DataFixer configured with a single fix (v1 → v2).
+ *
+ *
Used by migration benchmarks that measure basic concurrent
+ * fix application without chain traversal overhead.
+ */
+ public DataFixer sharedFixer;
+
+ /**
+ * Shared DataFixer configured with a 10-fix chain (v1 → v11).
+ *
+ *
Used by {@link #concurrentChainMigration} to measure concurrent
+ * performance when applying multiple sequential fixes.
The registry is frozen after population to ensure thread-safe
+ * read access during benchmarks. Versions range from 10 to 1000
+ * in increments of 10.
+ */
+ public SchemaRegistry sharedRegistry;
+
+ /**
+ * Source version for all migrations (v1).
+ */
+ public DataVersion fromVersion;
+
+ /**
+ * Target version for single-fix migrations (v2).
+ */
+ public DataVersion toVersion;
+
+ /**
+ * Target version for chain migrations (v11).
+ */
+ public DataVersion chainToVersion;
+
+ /**
+ * Pre-computed DataVersion array for registry lookup benchmarks.
+ *
+ *
Contains 100 versions (10, 20, 30, ..., 1000) matching the
+ * schemas registered in {@link #sharedRegistry}. Pre-allocation
+ * avoids DataVersion object creation during measurement.
+ */
+ public DataVersion[] registryVersions;
+
+ /**
+ * Initializes all shared benchmark state.
+ *
+ *
Creates DataFixer instances, populates the SchemaRegistry with
+ * 100 versions, and pre-computes all version constants. The registry
+ * is frozen after population to enable lock-free concurrent reads.
+ */
+ @Setup(Level.Trial)
+ public void setup() {
+ this.sharedFixer = BenchmarkBootstrap.createSingleFixFixer();
+ this.sharedChainFixer = BenchmarkBootstrap.createChainFixer(10);
+
+ this.fromVersion = new DataVersion(1);
+ this.toVersion = new DataVersion(2);
+ this.chainToVersion = new DataVersion(11);
+
+ final SimpleSchemaRegistry registry = new SimpleSchemaRegistry();
+ this.registryVersions = new DataVersion[100];
+ for (int i = 0; i < 100; i++) {
+ final int version = (i + 1) * 10;
+ this.registryVersions[i] = new DataVersion(version);
+ registry.register(MockSchemas.minimal(version));
+ }
+ registry.freeze();
+ this.sharedRegistry = registry;
+ }
+ }
+
+ /**
+ * Per-thread benchmark state for isolated data and random access patterns.
+ *
+ *
This state class provides each benchmark thread with its own input data
+ * and random number generator to eliminate false sharing and contention on
+ * thread-local operations.
+ *
+ *
Design Rationale
+ *
+ *
Thread-local input: Each thread operates on its own Dynamic instance,
+ * preventing write contention and ensuring independent GC behavior
+ *
SplittableRandom: Faster and contention-free compared to
+ * {@link java.util.Random} which uses atomic CAS operations
+ *
Pre-computed indices: Random registry indices are generated during
+ * setup to avoid RNG overhead during measurement
+ *
+ *
+ *
Index Buffer Strategy
+ *
The {@link #registryIndexBuffer} uses a power-of-two size (1024) with
+ * bitwise AND masking for efficient wraparound without modulo operations.
+ * This provides pseudo-random access patterns while minimizing measurement
+ * overhead.
+ */
+ @State(Scope.Thread)
+ public static class ThreadState {
+
+ /**
+ * Size of the pre-computed random index buffer.
+ *
+ *
Power of two (1024) enables efficient wraparound via bitwise AND.
+ * Large enough to avoid pattern repetition affecting cache behavior
+ * during typical measurement windows.
+ */
+ private static final int INDEX_BUFFER_SIZE = 1024;
+
+ /**
+ * Bitmask for efficient modulo operation on buffer index.
+ *
+ *
Used as {@code cursor & INDEX_MASK} instead of {@code cursor % INDEX_BUFFER_SIZE}
+ * for faster wraparound calculation.
+ */
+ private static final int INDEX_MASK = INDEX_BUFFER_SIZE - 1;
+
+ /**
+ * Pre-computed random indices for registry lookup benchmarks.
+ *
+ *
Populated during iteration setup with random values in range
+ * [0, registryVersions.length). Accessed via {@link #nextRegistryIndex()}.
+ */
+ private final int[] registryIndexBuffer = new int[INDEX_BUFFER_SIZE];
+
+ /**
+ * Per-thread input data for migration benchmarks.
+ *
+ *
Regenerated at each iteration to ensure consistent memory allocation
+ * patterns and prevent cross-iteration caching effects.
+ */
+ public Dynamic threadInput;
+
+ /**
+ * Current position in the {@link #registryIndexBuffer}.
+ *
+ *
Incremented on each call to {@link #nextRegistryIndex()} and
+ * wrapped using {@link #INDEX_MASK}.
+ */
+ private int registryCursor;
+
+ /**
+ * Per-thread random number generator.
+ *
+ *
{@link SplittableRandom} is used instead of {@link java.util.Random}
+ * because it is faster and does not use atomic operations, eliminating
+ * contention when multiple threads generate random numbers.
+ */
+ private SplittableRandom random;
+
+ /**
+ * Initializes the per-thread random number generator.
+ *
+ *
Called once per trial. Uses a fixed seed (42) for reproducibility
+ * across benchmark runs, though each thread will produce different
+ * sequences due to {@link SplittableRandom}'s splittable nature.
+ */
+ @Setup(Level.Trial)
+ public void setupTrial() {
+ // Per-thread RNG avoids contention and is faster than java.util.Random.
+ this.random = new SplittableRandom(42L);
+ }
+
+ /**
+ * Regenerates input data and random indices for each iteration.
+ *
+ *
Fresh data generation per iteration ensures:
+ *
+ *
Consistent GC pressure across iterations
+ *
No JIT over-optimization on specific data patterns
+ *
Independent memory allocation per thread
+ *
+ *
+ *
The random index buffer is refilled with new random values to
+ * vary the registry access pattern across iterations.
+ *
+ * @param s the shared benchmark state providing payload size and version array
+ */
+ @Setup(Level.Iteration)
+ public void setupIteration(final BenchmarkState s) {
+ this.threadInput = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, s.payloadSize);
+
+ for (int i = 0; i < INDEX_BUFFER_SIZE; i++) {
+ this.registryIndexBuffer[i] = this.random.nextInt(s.registryVersions.length);
+ }
+ this.registryCursor = 0;
+ }
+
+ /**
+ * Returns the next pre-computed random index for registry lookups.
+ *
+ *
Retrieves the next value from {@link #registryIndexBuffer} and
+ * advances the cursor with efficient bitwise wraparound. This method
+ * is called during measurement and is optimized to minimize overhead.
+ *
+ * @return a random index in range [0, registryVersions.length)
+ */
+ public int nextRegistryIndex() {
+ return this.registryIndexBuffer[this.registryCursor++ & INDEX_MASK];
+ }
+ }
}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java
new file mode 100644
index 0000000..9b374ee
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/package-info.java
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * Concurrency-focused JMH benchmarks for the Aether DataFixers framework.
+ *
+ *
This package contains benchmarks that measure performance characteristics under
+ * concurrent load. These benchmarks validate thread-safety of the DataFixer system,
+ * identify contention points, and quantify scalability across different thread counts.
Single-threaded benchmarks measure raw operation performance, but real-world
+ * applications often use the DataFixer system from multiple threads simultaneously.
+ * Concurrent benchmarks reveal:
+ *
+ *
Lock contention: Synchronization overhead in shared components
+ *
Cache coherency effects: Performance impact of shared data access
+ *
Scalability limits: Point at which adding threads stops improving throughput
+ *
Thread-safety validation: Correctness under concurrent access
The {@link de.splatgames.aether.datafixers.benchmarks.core core} package
+ * measures single-threaded baseline performance. Use concurrent benchmarks to:
+ *
+ *
Calculate concurrency overhead: {@code (single-threaded throughput × N threads) / actual throughput}
+ *
Identify scaling efficiency: {@code actual throughput / (single-threaded throughput × N threads)}
+ *
Detect regression: Compare concurrent results across code changes
This class provides a convenient way to run benchmarks programmatically
- * with default settings optimized for comprehensive performance analysis.
+ *
This class provides both a command-line interface and programmatic API for
+ * executing benchmarks. It supports all standard JMH options while providing
+ * convenient preset configurations for common benchmark scenarios.
*
- *
Usage
+ *
Execution Methods
*
- *
Via exec:java (Quick Development Runs)
+ *
Via Maven exec:java (Development)
+ *
Quick way to run benchmarks during development without building a JAR:
*
{@code
* # Run all benchmarks with default settings
* mvn exec:java -pl aether-datafixers-benchmarks
*
* # Run with JMH arguments
* mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="-h"
+ *
+ * # Run specific benchmark pattern
+ * mvn exec:java -pl aether-datafixers-benchmarks -Dexec.args="SingleFixBenchmark"
* }
*
- *
Via Fat JAR (Production Runs)
+ *
Via Fat JAR (Production)
+ *
Recommended for production benchmark runs with full JMH isolation:
*
{@code
* # Build the fat JAR
* mvn clean package -pl aether-datafixers-benchmarks -DskipTests
@@ -60,47 +66,115 @@
* # Run with custom parameters
* java -jar target/*-benchmarks.jar -p payloadSize=LARGE -wi 3 -i 5 -f 1
*
- * # Output JSON results
+ * # Output JSON results for analysis
* java -jar target/*-benchmarks.jar -rf json -rff results.json
*
* # List all available benchmarks
* java -jar target/*-benchmarks.jar -l
+ *
+ * # Profile with async-profiler
+ * java -jar target/*-benchmarks.jar -prof async:output=flamegraph
* }
For integration with test frameworks or custom tooling:
+ *
{@code
+ * // Run all benchmarks
+ * BenchmarkRunner.runAllBenchmarks();
+ *
+ * // Run quick validation (CI/CD)
+ * BenchmarkRunner.runQuickBenchmarks();
+ *
+ * // Run only core benchmarks
+ * BenchmarkRunner.runCoreBenchmarks();
+ *
+ * // Run only format benchmarks
+ * BenchmarkRunner.runFormatBenchmarks();
+ * }
*
*
Default Configuration
- *
- *
Warmup: 5 iterations, 1 second each
- *
Measurement: 10 iterations, 1 second each
- *
Forks: 2 (for statistical significance)
- *
JVM heap: 2GB min/max
- *
+ *
+ *
Setting
Default
Quick Mode
+ *
Warmup iterations
5
2
+ *
Measurement iterations
10
3
+ *
Forks
2
1
+ *
JVM heap
2 GB
1 GB
+ *
+ *
+ *
Common JMH Options
+ *
+ *
Option
Description
Example
+ *
{@code -wi}
Warmup iterations
{@code -wi 3}
+ *
{@code -i}
Measurement iterations
{@code -i 5}
+ *
{@code -f}
Number of forks
{@code -f 1}
+ *
{@code -p}
Parameter value
{@code -p payloadSize=SMALL}
+ *
{@code -t}
Thread count
{@code -t 4}
+ *
{@code -rf}
Result format
{@code -rf json}
+ *
{@code -rff}
Result file
{@code -rff results.json}
+ *
{@code -l}
List benchmarks
{@code -l}
+ *
{@code -prof}
Profiler
{@code -prof gc}
+ *
*
* @author Erik Pförtner
+ * @see de.splatgames.aether.datafixers.benchmarks.core
+ * @see de.splatgames.aether.datafixers.benchmarks.codec
+ * @see de.splatgames.aether.datafixers.benchmarks.concurrent
* @since 1.0.0
*/
public final class BenchmarkRunner {
+ /**
+ * Private constructor to prevent instantiation.
+ */
private BenchmarkRunner() {
// Main class
}
/**
- * Main entry point for running benchmarks.
+ * Main entry point for running benchmarks from the command line.
+ *
+ *
Behavior depends on whether arguments are provided:
+ *
+ *
With arguments: Delegates to JMH's main method, supporting all
+ * standard JMH command-line options
+ *
Without arguments: Runs all benchmarks using default configuration
+ * via {@link #runAllBenchmarks()}
+ *
*
- *
When run without arguments, executes all benchmarks in the package.
- * Supports all standard JMH command-line arguments.
+ *
Exit Codes
+ *
+ *
0 - Successful completion
+ *
Non-zero - Error during benchmark execution
+ *
*
- * @param args command-line arguments (passed to JMH)
+ * @param args command-line arguments (passed directly to JMH if present)
* @throws RunnerException if benchmark execution fails
- * @throws IOException if there is an I/O error
+ * @throws IOException if there is an I/O error reading benchmark metadata
*/
public static void main(final String[] args) throws RunnerException, IOException {
if (args.length > 0) {
@@ -113,9 +187,28 @@ public static void main(final String[] args) throws RunnerException, IOException
}
/**
- * Runs all benchmarks with default configuration.
+ * Runs all benchmarks in the benchmarks package with default configuration.
+ *
+ *
Executes every benchmark class in
+ * {@code de.splatgames.aether.datafixers.benchmarks.*} with production-quality
+ * settings suitable for reliable performance measurements.
+ *
+ *
Configuration
+ *
+ *
Warmup: 5 iterations
+ *
Measurement: 10 iterations
+ *
Forks: 2 (for JIT variance mitigation)
+ *
JVM heap: 2 GB min/max
+ *
+ *
+ *
Note: Running all benchmarks can take significant time depending
+ * on the number of parameter combinations. Consider using
+ * {@link #runQuickBenchmarks()} for validation or {@link #runCoreBenchmarks()}
+ * for focused testing.
*
* @throws RunnerException if benchmark execution fails
+ * @see #runQuickBenchmarks()
+ * @see #runCoreBenchmarks()
*/
public static void runAllBenchmarks() throws RunnerException {
final Options options = new OptionsBuilder()
@@ -130,11 +223,31 @@ public static void runAllBenchmarks() throws RunnerException {
}
/**
- * Runs a quick subset of benchmarks for validation.
+ * Runs a quick subset of benchmarks for fast validation.
*
- *
Useful for CI/CD pipelines or quick sanity checks.
+ *
Executes only the {@code SingleFixBenchmark} with minimal iterations,
+ * suitable for:
+ *
+ *
CI/CD pipeline smoke tests
+ *
Quick sanity checks during development
+ *
Verifying benchmark infrastructure works correctly
+ *
+ *
+ *
Configuration
+ *
+ *
Benchmark: SingleFixBenchmark only
+ *
Warmup: 2 iterations
+ *
Measurement: 3 iterations
+ *
Forks: 1 (faster but less statistically robust)
+ *
JVM heap: 1 GB min/max
+ *
Payload size: SMALL only
+ *
+ *
+ *
Warning: Results from quick benchmarks should not be used for
+ * performance comparisons due to reduced statistical rigor.
*
* @throws RunnerException if benchmark execution fails
+ * @see #runAllBenchmarks()
*/
public static void runQuickBenchmarks() throws RunnerException {
final Options options = new OptionsBuilder()
@@ -150,9 +263,30 @@ public static void runQuickBenchmarks() throws RunnerException {
}
/**
- * Runs core migration benchmarks only.
+ * Runs only the core migration benchmarks.
+ *
+ *
Executes benchmarks in the {@code core} package that measure DataFixer
+ * migration performance:
+ *
+ *
{@code SingleFixBenchmark} - Single fix application performance
Use this method when focusing on migration performance without
+ * format-specific or codec overhead considerations.
*
* @throws RunnerException if benchmark execution fails
+ * @see #runFormatBenchmarks()
+ * @see #runAllBenchmarks()
*/
public static void runCoreBenchmarks() throws RunnerException {
final Options options = new OptionsBuilder()
@@ -167,9 +301,31 @@ public static void runCoreBenchmarks() throws RunnerException {
}
/**
- * Runs format comparison benchmarks only.
+ * Runs only the format comparison benchmarks.
+ *
+ *
Executes benchmarks in the {@code format} package that compare different
+ * DynamicOps implementations:
+ *
+ *
{@code JsonBenchmark} - GsonOps vs JacksonJsonOps
+ *
{@code YamlBenchmark} - SnakeYamlOps vs JacksonYamlOps
+ *
{@code TomlXmlBenchmark} - JacksonTomlOps and JacksonXmlOps
+ *
{@code CrossFormatBenchmark} - Format conversion performance
+ *
+ *
+ *
Configuration
+ *
+ *
Warmup: 5 iterations
+ *
Measurement: 10 iterations
+ *
Forks: 2
+ *
JVM heap: 2 GB min/max
+ *
+ *
+ *
Use this method when evaluating which DynamicOps implementation
+ * to use for a specific use case, or when optimizing format handling.
*
* @throws RunnerException if benchmark execution fails
+ * @see #runCoreBenchmarks()
+ * @see #runAllBenchmarks()
*/
public static void runFormatBenchmarks() throws RunnerException {
final Options options = new OptionsBuilder()
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
index a55b729..56405aa 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
@@ -49,10 +49,95 @@
/**
* JMH benchmark for collection codec encode/decode performance.
*
- *
Measures the performance of encoding and decoding lists of various sizes
- * using the {@link Codecs#list(Codec)} API.
+ *
Measures the performance of list codec operations with parameterized collection
+ * sizes. These benchmarks reveal how codec performance scales with data volume and
+ * help identify potential bottlenecks in collection traversal and element processing.
+ *
+ *
Benchmark Categories
+ *
+ *
String List Benchmarks
+ *
Measure {@code List} codec operations:
+ *
+ *
{@link #encodeStringList} - Encode string list to JSON array
+ *
{@link #decodeStringList} - Decode JSON array to string list
+ *
{@link #roundTripStringListDirect} - Complete round-trip with direct extraction
+ *
{@link #roundTripStringListFunctional} - Complete round-trip using functional API
+ *
+ *
+ *
Integer List Benchmarks
+ *
Measure {@code List} codec operations:
+ *
+ *
{@link #encodeIntList} - Encode integer list to JSON array
+ *
{@link #decodeIntList} - Decode JSON array to integer list
+ *
{@link #roundTripIntListDirect} - Complete round-trip with direct extraction
+ *
{@link #roundTripIntListFunctional} - Complete round-trip using functional API
+ *
+ *
+ *
Parameters
+ *
+ *
Parameter
Values
Description
+ *
listSize
10, 100, 1000
Number of elements in the test list
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2 (for JIT variance mitigation)
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Microseconds (appropriate for collection operations)
+ *
+ *
+ *
Test Data Generation
+ *
+ *
Collection
Element Pattern
Example (size=3)
+ *
String List
{@code "item-" + index}
["item-0", "item-1", "item-2"]
+ *
Integer List
{@code index}
[0, 1, 2]
+ *
+ *
+ *
Interpreting Results
+ *
+ *
Linear scaling: Expected behavior where time scales proportionally with list size.
+ * If 100 elements takes 10x longer than 10 elements, scaling is linear.
+ *
Sub-linear scaling: Better than expected, may indicate JIT optimizations
+ * or efficient batch processing.
+ *
Super-linear scaling: Performance degrades faster than list size grows.
+ * May indicate memory pressure, GC overhead, or algorithmic inefficiency.
+ *
String vs Integer: String lists typically have higher overhead due to
+ * object allocation and potential string interning effects.
+ *
Direct vs Functional: Functional API (using {@code flatMap}) may show
+ * slight overhead from lambda creation and DataResult chaining.
+ *
+ *
+ *
Usage
+ *
{@code
+ * # Run all collection codec benchmarks
+ * java -jar benchmarks.jar CollectionCodecBenchmark
+ *
+ * # Run with specific list size
+ * java -jar benchmarks.jar CollectionCodecBenchmark -p listSize=1000
+ *
+ * # Run only string list benchmarks
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.*String.*"
+ *
+ * # Run only encode benchmarks
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.encode.*"
+ *
+ * # Compare direct vs functional round-trip
+ * java -jar benchmarks.jar "CollectionCodecBenchmark.roundTrip.*"
+ *
+ * # Quick validation run
+ * java -jar benchmarks.jar CollectionCodecBenchmark -wi 1 -i 1 -f 1
+ *
+ * # Generate JSON report for analysis
+ * java -jar benchmarks.jar CollectionCodecBenchmark -rf json -rff collection_results.json
+ * }
*
* @author Erik Pförtner
+ * @see PrimitiveCodecBenchmark
+ * @see de.splatgames.aether.datafixers.api.codec.Codecs#list(Codec)
+ * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps
* @since 1.0.0
*/
@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
@@ -63,24 +148,89 @@
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
public class CollectionCodecBenchmark {
+ /**
+ * The number of elements in test lists, injected by JMH.
+ *
+ *
This parameter controls the size of both string and integer lists.
+ * Different sizes reveal scaling characteristics of the list codec:
+ *
+ *
10: Small list baseline, minimal memory/GC impact
+ *
100: Medium list, typical real-world collection size
+ *
1000: Large list stress test, reveals scaling behavior
+ *
+ */
@Param({"10", "100", "1000"})
private int listSize;
+ /**
+ * The DynamicOps implementation used for all codec operations.
+ *
+ *
GsonOps is used as the reference JSON implementation for benchmarks.
Creates list codecs by composing primitive codecs with {@link Codecs#list(Codec)}
+ *
Populates test lists with {@link #listSize} elements each
+ *
Pre-encodes both lists to JSON for decode benchmark isolation
+ *
+ *
+ *
Using {@link ArrayList} with pre-sized capacity avoids resizing overhead
+ * during population.
+ */
@Setup(Level.Trial)
public void setup() {
+ this.ops = GsonOps.INSTANCE;
+
this.stringListCodec = Codecs.list(Codecs.STRING);
this.intListCodec = Codecs.list(Codecs.INT);
- // Generate test data
this.stringList = new ArrayList<>(this.listSize);
this.intList = new ArrayList<>(this.listSize);
@@ -89,92 +239,168 @@ public void setup() {
this.intList.add(i);
}
- // Pre-encode for decode benchmarks
- this.encodedStringList = this.stringListCodec.encodeStart(GsonOps.INSTANCE, this.stringList)
+ this.encodedStringList = this.stringListCodec.encodeStart(this.ops, this.stringList)
.result().orElseThrow();
- this.encodedIntList = this.intListCodec.encodeStart(GsonOps.INSTANCE, this.intList)
+ this.encodedIntList = this.intListCodec.encodeStart(this.ops, this.intList)
.result().orElseThrow();
}
- // ==================== String List ====================
+ // ==================== String List Benchmarks ====================
/**
- * Benchmarks encoding a list of strings.
+ * Benchmarks string list encoding to JSON array.
+ *
+ *
Measures the performance of converting a {@code List} to a JSON
+ * array element. Each string element is individually encoded and added to the
+ * resulting array.
+ *
+ *
Performance factors:
+ *
+ *
List iteration overhead
+ *
Per-element string encoding cost
+ *
JSON array construction and element addition
+ *
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void encodeStringList(final Blackhole blackhole) {
- final DataResult result = this.stringListCodec.encodeStart(
- GsonOps.INSTANCE, this.stringList);
+ final DataResult result = this.stringListCodec.encodeStart(this.ops, this.stringList);
blackhole.consume(result);
}
/**
- * Benchmarks decoding a list of strings.
+ * Benchmarks string list decoding from JSON array.
+ *
+ *
Measures the performance of extracting a {@code List} from a
+ * pre-encoded JSON array. Each array element is decoded to a string and
+ * collected into the result list.
+ *
+ *
Performance factors:
+ *
+ *
JSON array traversal
+ *
Per-element string extraction
+ *
Result list construction and population
+ *
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void decodeStringList(final Blackhole blackhole) {
- final DataResult, JsonElement>> result = this.stringListCodec.decode(
- GsonOps.INSTANCE, this.encodedStringList);
+ final DataResult, JsonElement>> result = this.stringListCodec.decode(this.ops, this.encodedStringList);
blackhole.consume(result);
}
- // ==================== Integer List ====================
+ // ==================== Integer List Benchmarks ====================
/**
- * Benchmarks encoding a list of integers.
+ * Benchmarks integer list encoding to JSON array.
+ *
+ *
Measures the performance of converting a {@code List} to a JSON
+ * array element. Integer encoding is typically faster than string encoding
+ * due to simpler value representation.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void encodeIntList(final Blackhole blackhole) {
- final DataResult result = this.intListCodec.encodeStart(
- GsonOps.INSTANCE, this.intList);
+ final DataResult result = this.intListCodec.encodeStart(this.ops, this.intList);
blackhole.consume(result);
}
/**
- * Benchmarks decoding a list of integers.
+ * Benchmarks integer list decoding from JSON array.
+ *
+ *
Measures the performance of extracting a {@code List} from a
+ * pre-encoded JSON array. Integer decoding involves numeric parsing from
+ * JSON number elements.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void decodeIntList(final Blackhole blackhole) {
- final DataResult, JsonElement>> result = this.intListCodec.decode(
- GsonOps.INSTANCE, this.encodedIntList);
+ final DataResult, JsonElement>> result = this.intListCodec.decode(this.ops, this.encodedIntList);
blackhole.consume(result);
}
- // ==================== Round Trip ====================
+ // ==================== Round-Trip Benchmarks (Direct Style) ====================
+
+ /**
+ * Benchmarks complete string list round-trip with direct result extraction.
+ *
+ *
Measures the combined performance of encoding a {@code List} to JSON
+ * and immediately decoding it back. Uses {@code result().orElseThrow()} for
+ * direct value extraction, representing typical imperative usage patterns.
+ *
+ *
This benchmark is useful for scenarios where data is temporarily serialized
+ * (e.g., caching, message passing) and immediately deserialized.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void roundTripStringListDirect(final Blackhole blackhole) {
+ final JsonElement json = this.stringListCodec.encodeStart(this.ops, this.stringList)
+ .result().orElseThrow();
+ final Pair, JsonElement> decoded = this.stringListCodec.decode(this.ops, json)
+ .result().orElseThrow();
+ blackhole.consume(decoded);
+ }
+
+ /**
+ * Benchmarks complete integer list round-trip with direct result extraction.
+ *
+ *
Measures the combined performance of encoding a {@code List} to JSON
+ * and immediately decoding it back using direct value extraction.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void roundTripIntListDirect(final Blackhole blackhole) {
+ final JsonElement json = this.intListCodec.encodeStart(this.ops, this.intList)
+ .result().orElseThrow();
+ final Pair, JsonElement> decoded = this.intListCodec.decode(this.ops, json)
+ .result().orElseThrow();
+ blackhole.consume(decoded);
+ }
+
+ // ==================== Round-Trip Benchmarks (Functional Style) ====================
/**
- * Benchmarks round-trip encoding and decoding of a string list.
+ * Benchmarks complete string list round-trip using functional API.
+ *
+ *
Measures the combined performance of encoding and decoding using
+ * {@link DataResult#flatMap} for monadic composition. This represents
+ * the functional programming style where operations are chained without
+ * explicit result unwrapping.
+ *
+ *
Comparing with {@link #roundTripStringListDirect} reveals the overhead
+ * (if any) of the functional API approach versus direct extraction.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void roundTripStringList(final Blackhole blackhole) {
- final DataResult encoded = this.stringListCodec.encodeStart(
- GsonOps.INSTANCE, this.stringList);
+ public void roundTripStringListFunctional(final Blackhole blackhole) {
+ final DataResult encoded = this.stringListCodec.encodeStart(this.ops, this.stringList);
final DataResult, JsonElement>> decoded = encoded.flatMap(
- json -> this.stringListCodec.decode(GsonOps.INSTANCE, json));
+ json -> this.stringListCodec.decode(this.ops, json)
+ );
blackhole.consume(decoded);
}
/**
- * Benchmarks round-trip encoding and decoding of an integer list.
+ * Benchmarks complete integer list round-trip using functional API.
+ *
+ *
Measures the combined performance of encoding and decoding using
+ * monadic composition via {@link DataResult#flatMap}.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void roundTripIntList(final Blackhole blackhole) {
- final DataResult encoded = this.intListCodec.encodeStart(
- GsonOps.INSTANCE, this.intList);
+ public void roundTripIntListFunctional(final Blackhole blackhole) {
+ final DataResult encoded = this.intListCodec.encodeStart(this.ops, this.intList);
final DataResult, JsonElement>> decoded = encoded.flatMap(
- json -> this.intListCodec.decode(GsonOps.INSTANCE, json));
+ json -> this.intListCodec.decode(this.ops, json)
+ );
blackhole.consume(decoded);
}
}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
index ad44bd4..7e9c8da 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
@@ -43,12 +43,102 @@
import java.util.concurrent.TimeUnit;
/**
- * JMH benchmark for primitive codec encode/decode performance.
+ * JMH benchmark for primitive type codec encode/decode performance.
*
- *
Measures the performance of encoding and decoding primitive types
- * using the {@link Codecs} API.
+ *
Measures the baseline performance of the fundamental codec operations for
+ * primitive Java types. These benchmarks establish the lower bound for codec
+ * performance and help identify overhead introduced by more complex codec
+ * compositions.
+ *
+ *
Benchmark Categories
+ *
+ *
Encode Benchmarks
+ *
Measure Java value to JSON element conversion:
+ *
+ *
{@link #encodeBool} - Boolean encoding
+ *
{@link #encodeInt} - Integer encoding
+ *
{@link #encodeLong} - Long encoding
+ *
{@link #encodeFloat} - Float encoding
+ *
{@link #encodeDouble} - Double encoding
+ *
{@link #encodeString} - String encoding
+ *
+ *
+ *
Decode Benchmarks
+ *
Measure JSON element to Java value conversion:
+ *
+ *
{@link #decodeBool} - Boolean decoding
+ *
{@link #decodeInt} - Integer decoding
+ *
{@link #decodeLong} - Long decoding
+ *
{@link #decodeFloat} - Float decoding
+ *
{@link #decodeDouble} - Double decoding
+ *
{@link #decodeString} - String decoding
+ *
+ *
+ *
Round-Trip Benchmarks
+ *
Measure complete encode-then-decode cycles:
+ *
+ *
{@link #roundTripIntDirect} - Integer round-trip with direct result extraction
+ *
{@link #roundTripStringDirect} - String round-trip with direct result extraction
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2 (for JIT variance mitigation)
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Nanoseconds (for fine-grained primitive ops)
+ *
+ *
+ *
Test Values
+ *
+ *
Type
Value
Notes
+ *
boolean
{@code true}
Single bit representation
+ *
int
{@code 42}
Small positive integer
+ *
long
{@code 123456789L}
Value exceeding int range representation
+ *
float
{@code 3.14159f}
Pi approximation (tests decimal handling)
+ *
double
{@code 2.718281828}
Euler's number (tests precision)
+ *
String
{@code "benchmark-test-string"}
21-character ASCII string
+ *
+ *
+ *
Interpreting Results
+ *
+ *
Encode vs Decode: Encoding typically allocates new JSON elements; decoding
+ * extracts values from existing elements. Similar performance is expected.
+ *
Numeric types: All numeric types should have similar performance as they
+ * map directly to JSON number primitives.
+ *
String codec: May show slightly different characteristics due to string
+ * interning and character encoding considerations.
+ *
Round-trip overhead: Should be approximately encode + decode time plus
+ * minimal DataResult unwrapping overhead.
*
* @author Erik Pförtner
+ * @see CollectionCodecBenchmark
+ * @see de.splatgames.aether.datafixers.api.codec.Codecs
+ * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps
* @since 1.0.0
*/
@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
@@ -59,131 +149,319 @@
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
public class PrimitiveCodecBenchmark {
- // Test values
+ /**
+ * Test boolean value for encoding benchmarks.
+ */
private static final boolean TEST_BOOL = true;
+
+ /**
+ * Test integer value for encoding benchmarks.
+ *
+ *
A small positive integer that fits in a single JSON number token.
+ */
private static final int TEST_INT = 42;
+
+ /**
+ * Test long value for encoding benchmarks.
+ *
+ *
A value that exceeds typical int range to test long-specific handling.
+ */
private static final long TEST_LONG = 123456789L;
+
+ /**
+ * Test float value for encoding benchmarks.
+ *
+ *
Pi approximation to test decimal point handling and precision.
+ */
private static final float TEST_FLOAT = 3.14159f;
+
+ /**
+ * Test double value for encoding benchmarks.
+ *
+ *
Euler's number with extended precision to test double encoding accuracy.
+ */
private static final double TEST_DOUBLE = 2.718281828;
+
+ /**
+ * Test string value for encoding benchmarks.
+ *
+ *
A 21-character ASCII string representing typical field values.
+ */
private static final String TEST_STRING = "benchmark-test-string";
- // Pre-encoded values for decode benchmarks
+ /**
+ * The DynamicOps implementation used for all codec operations.
+ *
+ *
GsonOps is used as the reference implementation for JSON format benchmarks.
+ */
+ private GsonOps ops;
+
+ /**
+ * Pre-encoded boolean JSON element for decode benchmarks.
+ */
private JsonElement encodedBool;
+
+ /**
+ * Pre-encoded integer JSON element for decode benchmarks.
+ */
private JsonElement encodedInt;
+
+ /**
+ * Pre-encoded long JSON element for decode benchmarks.
+ */
private JsonElement encodedLong;
+
+ /**
+ * Pre-encoded float JSON element for decode benchmarks.
+ */
private JsonElement encodedFloat;
+
+ /**
+ * Pre-encoded double JSON element for decode benchmarks.
+ */
private JsonElement encodedDouble;
+
+ /**
+ * Pre-encoded string JSON element for decode benchmarks.
+ */
private JsonElement encodedString;
+ /**
+ * Initializes pre-encoded JSON elements for decode benchmarks.
+ *
+ *
Pre-encoding ensures decode benchmarks measure only decoding performance
+ * without encoding overhead. All test values are encoded once at trial start.
Measures the performance of converting a Java {@code boolean} to a
+ * JSON boolean element via {@link Codecs#BOOL}.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void encodeBool(final Blackhole blackhole) {
- final DataResult result = Codecs.BOOL.encodeStart(GsonOps.INSTANCE, TEST_BOOL);
+ final DataResult result = Codecs.BOOL.encodeStart(this.ops, TEST_BOOL);
blackhole.consume(result);
}
+ /**
+ * Benchmarks boolean value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code Boolean} from a
+ * pre-encoded JSON boolean element.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void decodeBool(final Blackhole blackhole) {
- final DataResult> result = Codecs.BOOL.decode(GsonOps.INSTANCE, this.encodedBool);
+ final DataResult> result = Codecs.BOOL.decode(this.ops, this.encodedBool);
blackhole.consume(result);
}
- // ==================== Integer ====================
+ // ==================== Integer Benchmarks ====================
+ /**
+ * Benchmarks integer value encoding to JSON.
+ *
+ *
Measures the performance of converting a Java {@code int} to a
+ * JSON number element via {@link Codecs#INT}.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void encodeInt(final Blackhole blackhole) {
- final DataResult result = Codecs.INT.encodeStart(GsonOps.INSTANCE, TEST_INT);
+ final DataResult result = Codecs.INT.encodeStart(this.ops, TEST_INT);
blackhole.consume(result);
}
+ /**
+ * Benchmarks integer value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code Integer} from a
+ * pre-encoded JSON number element.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void decodeInt(final Blackhole blackhole) {
- final DataResult> result = Codecs.INT.decode(GsonOps.INSTANCE, this.encodedInt);
+ final DataResult> result = Codecs.INT.decode(this.ops, this.encodedInt);
blackhole.consume(result);
}
- // ==================== Long ====================
+ // ==================== Long Benchmarks ====================
+ /**
+ * Benchmarks long value encoding to JSON.
+ *
+ *
Measures the performance of converting a Java {@code long} to a
+ * JSON number element via {@link Codecs#LONG}.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void encodeLong(final Blackhole blackhole) {
- final DataResult result = Codecs.LONG.encodeStart(GsonOps.INSTANCE, TEST_LONG);
+ final DataResult result = Codecs.LONG.encodeStart(this.ops, TEST_LONG);
blackhole.consume(result);
}
+ /**
+ * Benchmarks long value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code Long} from a
+ * pre-encoded JSON number element.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void decodeLong(final Blackhole blackhole) {
- final DataResult> result = Codecs.LONG.decode(GsonOps.INSTANCE, this.encodedLong);
+ final DataResult> result = Codecs.LONG.decode(this.ops, this.encodedLong);
blackhole.consume(result);
}
- // ==================== Float ====================
+ // ==================== Float Benchmarks ====================
+ /**
+ * Benchmarks float value encoding to JSON.
+ *
+ *
Measures the performance of converting a Java {@code float} to a
+ * JSON number element via {@link Codecs#FLOAT}. Float encoding involves
+ * decimal representation which may differ from integer encoding.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void encodeFloat(final Blackhole blackhole) {
- final DataResult result = Codecs.FLOAT.encodeStart(GsonOps.INSTANCE, TEST_FLOAT);
+ final DataResult result = Codecs.FLOAT.encodeStart(this.ops, TEST_FLOAT);
blackhole.consume(result);
}
+ /**
+ * Benchmarks float value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code Float} from a
+ * pre-encoded JSON number element. Decoding involves parsing the decimal
+ * representation back to IEEE 754 single-precision format.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void decodeFloat(final Blackhole blackhole) {
- final DataResult> result = Codecs.FLOAT.decode(GsonOps.INSTANCE, this.encodedFloat);
+ final DataResult> result = Codecs.FLOAT.decode(this.ops, this.encodedFloat);
blackhole.consume(result);
}
- // ==================== Double ====================
+ // ==================== Double Benchmarks ====================
+ /**
+ * Benchmarks double value encoding to JSON.
+ *
+ *
Measures the performance of converting a Java {@code double} to a
+ * JSON number element via {@link Codecs#DOUBLE}. Double encoding preserves
+ * higher precision than float but uses similar mechanisms.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void encodeDouble(final Blackhole blackhole) {
- final DataResult result = Codecs.DOUBLE.encodeStart(GsonOps.INSTANCE, TEST_DOUBLE);
+ final DataResult result = Codecs.DOUBLE.encodeStart(this.ops, TEST_DOUBLE);
blackhole.consume(result);
}
+ /**
+ * Benchmarks double value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code Double} from a
+ * pre-encoded JSON number element.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void decodeDouble(final Blackhole blackhole) {
- final DataResult> result = Codecs.DOUBLE.decode(GsonOps.INSTANCE, this.encodedDouble);
+ final DataResult> result = Codecs.DOUBLE.decode(this.ops, this.encodedDouble);
blackhole.consume(result);
}
- // ==================== String ====================
+ // ==================== String Benchmarks ====================
+ /**
+ * Benchmarks string value encoding to JSON.
+ *
+ *
Measures the performance of converting a Java {@code String} to a
+ * JSON string element via {@link Codecs#STRING}. String encoding may involve
+ * escape sequence handling for special characters.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void encodeString(final Blackhole blackhole) {
- final DataResult result = Codecs.STRING.encodeStart(GsonOps.INSTANCE, TEST_STRING);
+ final DataResult result = Codecs.STRING.encodeStart(this.ops, TEST_STRING);
blackhole.consume(result);
}
+ /**
+ * Benchmarks string value decoding from JSON.
+ *
+ *
Measures the performance of extracting a Java {@code String} from a
+ * pre-encoded JSON string element.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
public void decodeString(final Blackhole blackhole) {
- final DataResult> result = Codecs.STRING.decode(GsonOps.INSTANCE, this.encodedString);
+ final DataResult> result = Codecs.STRING.decode(this.ops, this.encodedString);
blackhole.consume(result);
}
- // ==================== Round Trip ====================
+ // ==================== Round-Trip Benchmarks ====================
+ /**
+ * Benchmarks complete integer round-trip (encode then decode).
+ *
+ *
Measures the combined performance of encoding a Java {@code int} to JSON
+ * and immediately decoding it back. Uses direct result extraction via
+ * {@code result().orElseThrow()} to measure the typical non-functional usage pattern.
+ *
+ *
Round-trip performance is important for scenarios where data is temporarily
+ * serialized (e.g., caching, IPC) and immediately deserialized.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
- public void roundTripInt(final Blackhole blackhole) {
- final DataResult encoded = Codecs.INT.encodeStart(GsonOps.INSTANCE, TEST_INT);
- final DataResult> decoded = encoded.flatMap(
- json -> Codecs.INT.decode(GsonOps.INSTANCE, json));
+ public void roundTripIntDirect(final Blackhole blackhole) {
+ final JsonElement json = Codecs.INT.encodeStart(this.ops, TEST_INT).result().orElseThrow();
+ final Pair decoded = Codecs.INT.decode(this.ops, json).result().orElseThrow();
blackhole.consume(decoded);
}
+ /**
+ * Benchmarks complete string round-trip (encode then decode).
+ *
+ *
Measures the combined performance of encoding a Java {@code String} to JSON
+ * and immediately decoding it back. String round-trips may involve additional
+ * overhead from string object creation compared to primitive numeric types.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
@Benchmark
- public void roundTripString(final Blackhole blackhole) {
- final DataResult encoded = Codecs.STRING.encodeStart(GsonOps.INSTANCE, TEST_STRING);
- final DataResult> decoded = encoded.flatMap(
- json -> Codecs.STRING.decode(GsonOps.INSTANCE, json));
+ public void roundTripStringDirect(final Blackhole blackhole) {
+ final JsonElement json = Codecs.STRING.encodeStart(this.ops, TEST_STRING).result().orElseThrow();
+ final Pair decoded = Codecs.STRING.decode(this.ops, json).result().orElseThrow();
blackhole.consume(decoded);
}
}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java
new file mode 100644
index 0000000..5720cfc
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/package-info.java
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * Codec-focused JMH benchmarks for the Aether DataFixers framework.
+ *
+ *
This package contains benchmarks that measure the performance of codec operations,
+ * including encoding (Java objects to serialized format) and decoding (serialized format
+ * to Java objects). These benchmarks establish baseline performance for the codec system
+ * and help identify bottlenecks in serialization pipelines.
Scaling with collection size, functional vs direct API overhead
+ *
+ *
+ *
+ *
Why Codec Benchmarks?
+ *
Codecs are fundamental to the DataFixer system, transforming data between typed
+ * Java objects and format-agnostic {@link de.splatgames.aether.datafixers.api.dynamic.Dynamic}
+ * representations. Understanding codec performance is essential for:
+ *
+ *
Baseline establishment: Primitive codecs set the lower bound for all
+ * codec operations; complex codecs compose these primitives
+ *
Bottleneck identification: Comparing encode vs decode reveals which
+ * direction is more expensive for a given type
+ *
Scaling analysis: Collection benchmarks show how performance changes
+ * with data volume
+ *
API comparison: Direct extraction vs functional composition may have
+ * different performance characteristics
{@link de.splatgames.aether.datafixers.benchmarks.concurrent concurrent} -
+ * Codec thread-safety is assumed; concurrent benchmarks validate this assumption
+ *
+ *
+ *
Supported Serialization Formats
+ *
These benchmarks use {@link de.splatgames.aether.datafixers.codec.json.gson.GsonOps}
+ * as the reference DynamicOps implementation. The codec system supports multiple formats:
+ *
+ *
JSON: GsonOps, JacksonJsonOps
+ *
YAML: SnakeYamlOps, JacksonYamlOps
+ *
TOML: JacksonTomlOps
+ *
XML: JacksonXmlOps
+ *
+ *
Future benchmarks may compare performance across different DynamicOps implementations.
+ *
+ * @see de.splatgames.aether.datafixers.benchmarks.codec.PrimitiveCodecBenchmark
+ * @see de.splatgames.aether.datafixers.benchmarks.codec.CollectionCodecBenchmark
+ * @see de.splatgames.aether.datafixers.api.codec.Codec
+ * @see de.splatgames.aether.datafixers.api.codec.Codecs
+ * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps
+ * @since 1.0.0
+ */
+package de.splatgames.aether.datafixers.benchmarks.codec;
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
index e60fd60..c74d288 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
@@ -114,7 +114,8 @@ public class SingleFixBenchmark {
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void singleRenameFix(final SizedState s, final Blackhole blackhole) {
+ public void singleRenameFix(final SizedState s,
+ final Blackhole blackhole) {
blackhole.consume(s.fixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
s.input,
@@ -133,7 +134,8 @@ public void singleRenameFix(final SizedState s, final Blackhole blackhole) {
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
- public void identityFix(final SizedState s, final Blackhole blackhole) {
+ public void identityFix(final SizedState s,
+ final Blackhole blackhole) {
blackhole.consume(s.identityFixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
s.input,
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
index 0cd6961..ac0bce9 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
@@ -24,8 +24,6 @@
import com.fasterxml.jackson.databind.JsonNode;
import com.google.gson.JsonElement;
-import de.splatgames.aether.datafixers.api.dynamic.Dynamic;
-import de.splatgames.aether.datafixers.api.dynamic.DynamicOps;
import de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator;
import de.splatgames.aether.datafixers.benchmarks.util.PayloadSize;
import de.splatgames.aether.datafixers.codec.json.gson.GsonOps;
@@ -49,12 +47,124 @@
import java.util.concurrent.TimeUnit;
/**
- * JMH benchmark for cross-format conversion performance.
+ * JMH benchmark for cross-format conversion performance between DynamicOps implementations.
*
- *
Measures the overhead of converting data between different
- * DynamicOps implementations using {@link DynamicOps#convertTo}.
+ *
This benchmark measures the overhead of converting data between different
+ * serialization formats using the {@code DynamicOps.convertTo()} mechanism. Cross-format
+ * conversion is essential when integrating systems that use different data formats
+ * or when migrating data through format-agnostic DataFixers.
+ *
+ *
Conversion Pairs Benchmarked
+ *
+ *
JSON Library Conversions
+ *
+ *
{@link #gsonToJackson} - Gson JsonElement → Jackson JsonNode
+ *
{@link #jacksonToGson} - Jackson JsonNode → Gson JsonElement
Measures the overhead of converting between two JSON libraries.
+ * Both represent JSON but use different internal tree structures.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void gsonToJackson(final Blackhole blackhole) {
- final JsonNode result = JacksonJsonOps.INSTANCE.convertTo(
- GsonOps.INSTANCE, this.gsonData.value());
+ final JsonNode result = this.jacksonJsonOps.convertTo(this.gsonOps, this.gsonRoot);
blackhole.consume(result);
}
/**
- * Benchmarks converting from Jackson JSON to Gson.
+ * Benchmarks conversion from Jackson JsonNode to Gson JsonElement.
+ *
+ *
Measures the reverse conversion from Jackson to Gson representation.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonToGson(final Blackhole blackhole) {
- final JsonElement result = GsonOps.INSTANCE.convertTo(
- JacksonJsonOps.INSTANCE, this.jacksonData.value());
+ final JsonElement result = this.gsonOps.convertTo(this.jacksonJsonOps, this.jacksonJsonRoot);
blackhole.consume(result);
}
- // ==================== Gson <-> SnakeYAML ====================
+ // ==================== Gson <-> SnakeYAML Conversions ====================
/**
- * Benchmarks converting from Gson to SnakeYAML.
+ * Benchmarks conversion from Gson JsonElement to SnakeYAML native types.
+ *
+ *
Measures cross-ecosystem conversion from JSON library to YAML library.
+ * SnakeYAML uses native Java Maps and Lists internally.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void gsonToSnakeYaml(final Blackhole blackhole) {
- final Object result = SnakeYamlOps.INSTANCE.convertTo(
- GsonOps.INSTANCE, this.gsonData.value());
+ final Object result = this.snakeYamlOps.convertTo(this.gsonOps, this.gsonRoot);
blackhole.consume(result);
}
/**
- * Benchmarks converting from SnakeYAML to Gson.
+ * Benchmarks conversion from SnakeYAML native types to Gson JsonElement.
+ *
+ *
Measures cross-ecosystem conversion from YAML native types to JSON tree.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void snakeYamlToGson(final Blackhole blackhole) {
- final JsonElement result = GsonOps.INSTANCE.convertTo(
- SnakeYamlOps.INSTANCE, this.snakeYamlData.value());
+ final JsonElement result = this.gsonOps.convertTo(this.snakeYamlOps, this.snakeYamlRoot);
blackhole.consume(result);
}
- // ==================== Jackson JSON <-> Jackson YAML ====================
+ // ==================== Jackson JSON <-> Jackson YAML Conversions ====================
/**
- * Benchmarks converting from Jackson JSON to Jackson YAML.
+ * Benchmarks conversion from Jackson JSON to Jackson YAML.
+ *
+ *
Measures conversion within the Jackson ecosystem. Both formats use
+ * JsonNode internally, potentially enabling optimizations.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonJsonToYaml(final Blackhole blackhole) {
- final JsonNode result = JacksonYamlOps.INSTANCE.convertTo(
- JacksonJsonOps.INSTANCE, this.jacksonData.value());
+ final JsonNode result = this.jacksonYamlOps.convertTo(this.jacksonJsonOps, this.jacksonJsonRoot);
blackhole.consume(result);
}
/**
- * Benchmarks converting from Jackson YAML to Jackson JSON.
+ * Benchmarks conversion from Jackson YAML to Jackson JSON.
+ *
+ *
Measures reverse conversion within the Jackson ecosystem.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonYamlToJson(final Blackhole blackhole) {
- final JsonNode result = JacksonJsonOps.INSTANCE.convertTo(
- JacksonYamlOps.INSTANCE, this.jacksonYamlData.value());
+ final JsonNode result = this.jacksonJsonOps.convertTo(this.jacksonYamlOps, this.jacksonYamlRoot);
blackhole.consume(result);
}
- // ==================== SnakeYAML <-> Jackson YAML ====================
+ // ==================== SnakeYAML <-> Jackson YAML Conversions ====================
/**
- * Benchmarks converting from SnakeYAML to Jackson YAML.
+ * Benchmarks conversion from SnakeYAML native types to Jackson YAML JsonNode.
+ *
+ *
Measures conversion between two YAML libraries with different internal
+ * representations (native Java types vs JsonNode).
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void snakeYamlToJacksonYaml(final Blackhole blackhole) {
- final JsonNode result = JacksonYamlOps.INSTANCE.convertTo(
- SnakeYamlOps.INSTANCE, this.snakeYamlData.value());
+ final JsonNode result = this.jacksonYamlOps.convertTo(this.snakeYamlOps, this.snakeYamlRoot);
blackhole.consume(result);
}
/**
- * Benchmarks converting from Jackson YAML to SnakeYAML.
+ * Benchmarks conversion from Jackson YAML JsonNode to SnakeYAML native types.
+ *
+ *
Measures reverse conversion from JsonNode to native Java Maps/Lists.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonYamlToSnakeYaml(final Blackhole blackhole) {
- final Object result = SnakeYamlOps.INSTANCE.convertTo(
- JacksonYamlOps.INSTANCE, this.jacksonYamlData.value());
+ final Object result = this.snakeYamlOps.convertTo(this.jacksonYamlOps, this.jacksonYamlRoot);
blackhole.consume(result);
}
}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
index 5dcccb6..d0f64b2 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
@@ -49,12 +49,98 @@
import java.util.concurrent.TimeUnit;
/**
- * JMH benchmark comparing Gson and Jackson JSON performance.
+ * JMH benchmark comparing JSON DynamicOps implementations: Gson vs Jackson.
*
- *
Measures DynamicOps operations and migration performance for both
- * JSON implementations.
+ *
This benchmark measures the performance of JSON-based operations using two
+ * different underlying libraries: Google Gson ({@link GsonOps}) and Jackson Databind
+ * ({@link JacksonJsonOps}). The results help determine which implementation is more
+ * suitable for specific use cases.
+ *
+ *
Benchmark Categories
+ *
+ *
Data Generation
+ *
Measure Dynamic object construction performance:
+ *
+ *
{@link #gsonGenerate} - Create Dynamic using GsonOps
+ *
{@link #jacksonGenerate} - Create Dynamic using JacksonJsonOps
+ *
+ *
+ *
Field Access
+ *
Measure field read operations on existing data:
+ *
+ *
{@link #gsonFieldRead} - Read field from Gson-backed Dynamic
+ *
{@link #jacksonFieldRead} - Read field from Jackson-backed Dynamic
+ *
+ *
+ *
Field Modification
+ *
Measure field write/set operations:
+ *
+ *
{@link #gsonFieldSet} - Set field on Gson-backed Dynamic
+ *
{@link #jacksonFieldSet} - Set field on Jackson-backed Dynamic
+ *
+ *
+ *
Migration
+ *
Measure DataFixer migration performance:
+ *
+ *
{@link #gsonMigration} - Apply fix to Gson-backed data
+ *
{@link #jacksonMigration} - Apply fix to Jackson-backed data
{@code
+ * # Run all JSON benchmarks
+ * java -jar benchmarks.jar JsonBenchmark
+ *
+ * # Compare only field access performance
+ * java -jar benchmarks.jar "JsonBenchmark.*FieldRead"
+ *
+ * # Run Gson-only benchmarks
+ * java -jar benchmarks.jar "JsonBenchmark.gson.*"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar JsonBenchmark -p payloadSize=LARGE
+ * }
*
* @author Erik Pförtner
+ * @see YamlBenchmark
+ * @see TomlXmlBenchmark
+ * @see CrossFormatBenchmark
+ * @see de.splatgames.aether.datafixers.codec.json.gson.GsonOps
+ * @see de.splatgames.aether.datafixers.codec.json.jackson.JacksonJsonOps
* @since 1.0.0
*/
@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
@@ -65,131 +151,258 @@
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
public class JsonBenchmark {
+ /**
+ * Field name used for read/write benchmarks.
+ *
+ *
References the first string field generated by {@link BenchmarkDataGenerator}.
+ */
+ private static final String FIELD_NAME = "stringField0";
+
+ /**
+ * Payload size parameter controlling test data complexity.
+ *
+ *
Injected by JMH to run benchmarks across different data sizes.
+ */
@Param({"SMALL", "MEDIUM", "LARGE"})
private PayloadSize payloadSize;
+ /**
+ * Google Gson DynamicOps implementation.
+ */
+ private GsonOps gsonOps;
+
+ /**
+ * Jackson Databind DynamicOps implementation.
+ */
+ private JacksonJsonOps jacksonOps;
+
+ /**
+ * Pre-generated test data using Gson.
+ */
private Dynamic gsonData;
+
+ /**
+ * Pre-generated test data using Jackson.
+ */
private Dynamic jacksonData;
- private DataFixer fixer;
+
+ /**
+ * DataFixer for Gson-based migrations.
+ */
+ private DataFixer gsonFixer;
+
+ /**
+ * Optional DataFixer for Jackson-based migrations.
+ *
+ *
May be {@code null} if no dedicated Jackson fixer is configured.
+ * In that case, cross-format migration behavior is measured instead.
+ */
+ private DataFixer jacksonFixer;
+
+ /**
+ * Source version for migrations (v1).
+ */
private DataVersion fromVersion;
+
+ /**
+ * Target version for migrations (v2).
+ */
private DataVersion toVersion;
+ /**
+ * Initializes DynamicOps instances, test data, and DataFixers.
+ *
+ *
Both Gson and Jackson data are pre-generated to isolate benchmark
+ * measurements from data creation overhead (except for generation benchmarks).
+ */
@Setup(Level.Trial)
public void setup() {
- this.gsonData = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, this.payloadSize);
- this.jacksonData = BenchmarkDataGenerator.generate(JacksonJsonOps.INSTANCE, this.payloadSize);
- this.fixer = BenchmarkBootstrap.createSingleFixFixer();
+ this.gsonOps = GsonOps.INSTANCE;
+ this.jacksonOps = JacksonJsonOps.INSTANCE;
+
+ this.gsonData = BenchmarkDataGenerator.generate(this.gsonOps, this.payloadSize);
+ this.jacksonData = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize);
+
+ this.gsonFixer = BenchmarkBootstrap.createSingleFixFixer();
+
+ // If you have a dedicated Jackson fixer, wire it here. Otherwise keep it null and measure cross-format explicitly.
+ // Example (if you add it later): this.jacksonFixer = BenchmarkBootstrap.createSingleFixFixerJackson();
+ this.jacksonFixer = null;
+
this.fromVersion = new DataVersion(1);
this.toVersion = new DataVersion(2);
}
- // ==================== Data Generation ====================
+ // ==================== Data Generation Benchmarks ====================
/**
- * Benchmarks Gson data generation.
+ * Benchmarks Dynamic object generation using GsonOps.
+ *
+ *
Measures the time to create a complete test data structure using
+ * Gson as the underlying JSON representation.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void gsonGenerate(final Blackhole blackhole) {
- final Dynamic data = BenchmarkDataGenerator.generate(
- GsonOps.INSTANCE, this.payloadSize);
+ final Dynamic data = BenchmarkDataGenerator.generate(this.gsonOps, this.payloadSize);
blackhole.consume(data);
}
/**
- * Benchmarks Jackson JSON data generation.
+ * Benchmarks Dynamic object generation using JacksonJsonOps.
+ *
+ *
Measures the time to create a complete test data structure using
+ * Jackson as the underlying JSON representation.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonGenerate(final Blackhole blackhole) {
- final Dynamic data = BenchmarkDataGenerator.generate(
- JacksonJsonOps.INSTANCE, this.payloadSize);
+ final Dynamic data = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize);
blackhole.consume(data);
}
- // ==================== Field Access ====================
+ // ==================== Field Access Benchmarks ====================
/**
- * Benchmarks Gson field read access.
+ * Benchmarks field read access on Gson-backed Dynamic.
+ *
+ *
Measures the time to retrieve a single field from a pre-existing
+ * Gson-based Dynamic object.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void gsonFieldRead(final Blackhole blackhole) {
- final Dynamic field = this.gsonData.get("stringField0");
+ final Dynamic field = this.gsonData.get(FIELD_NAME);
blackhole.consume(field);
}
/**
- * Benchmarks Jackson field read access.
+ * Benchmarks field read access on Jackson-backed Dynamic.
+ *
+ *
Measures the time to retrieve a single field from a pre-existing
+ * Jackson-based Dynamic object.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonFieldRead(final Blackhole blackhole) {
- final Dynamic field = this.jacksonData.get("stringField0");
+ final Dynamic field = this.jacksonData.get(FIELD_NAME);
blackhole.consume(field);
}
- // ==================== Field Modification ====================
+ // ==================== Field Modification Benchmarks ====================
/**
- * Benchmarks Gson field set operation.
+ * Benchmarks field set operation on Gson-backed Dynamic.
+ *
+ *
Measures the time to add a new field to a Gson-based Dynamic object.
+ * This operation typically creates a new Dynamic with the modified content.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void gsonFieldSet(final Blackhole blackhole) {
final Dynamic result = this.gsonData.set(
- "newField", this.gsonData.createString("newValue"));
+ "newField",
+ this.gsonData.createString("newValue")
+ );
blackhole.consume(result);
}
/**
- * Benchmarks Jackson field set operation.
+ * Benchmarks field set operation on Jackson-backed Dynamic.
+ *
+ *
Measures the time to add a new field to a Jackson-based Dynamic object.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonFieldSet(final Blackhole blackhole) {
final Dynamic result = this.jacksonData.set(
- "newField", this.jacksonData.createString("newValue"));
+ "newField",
+ this.jacksonData.createString("newValue")
+ );
blackhole.consume(result);
}
- // ==================== Migration ====================
+ // ==================== Migration Benchmarks ====================
/**
- * Benchmarks migration with Gson DynamicOps.
+ * Benchmarks DataFixer migration on Gson-backed data.
+ *
+ *
Measures the time to apply a single fix migration to Gson-based
+ * Dynamic data. This represents the typical migration scenario where
+ * both fixer and data use the same DynamicOps implementation.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void gsonMigration(final Blackhole blackhole) {
- final Dynamic result = this.fixer.update(
+ final Dynamic result = this.gsonFixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
this.gsonData,
this.fromVersion,
- this.toVersion);
+ this.toVersion
+ );
blackhole.consume(result);
}
/**
- * Benchmarks migration with Jackson DynamicOps.
+ * Benchmarks DataFixer migration on Jackson-backed data.
+ *
+ *
If a dedicated Jackson fixer is available, measures native Jackson
+ * migration. Otherwise, falls back to cross-format migration using the
+ * Gson-based fixer with Jackson input data.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonMigration(final Blackhole blackhole) {
- // Note: Jackson migration uses Jackson-based data
- // The fixer is Gson-based, so this tests cross-format behavior
- final Dynamic result = this.fixer.update(
+ if (this.jacksonFixer == null) {
+ // No dedicated Jackson fixer available -> this would not be a fair "Jackson migration" benchmark.
+ // Measure the cross-format behavior explicitly instead.
+ final Dynamic result = this.gsonFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.jacksonData,
+ this.fromVersion,
+ this.toVersion
+ );
+ blackhole.consume(result);
+ return;
+ }
+
+ final Dynamic result = this.jacksonFixer.update(
+ BenchmarkBootstrap.BENCHMARK_TYPE,
+ this.jacksonData,
+ this.fromVersion,
+ this.toVersion
+ );
+ blackhole.consume(result);
+ }
+
+ /**
+ * Benchmarks cross-format migration with Jackson input and Gson-based fixer.
+ *
+ *
Measures the performance overhead when the fixer's DynamicOps differs
+ * from the input data's DynamicOps. This scenario is common when migrating
+ * data from various sources through a centralized fixer.
+ *
+ *
Comparing this benchmark with {@link #gsonMigration} reveals the
+ * overhead of format conversion during migration.
+ *
+ * @param blackhole JMH blackhole to prevent dead code elimination
+ */
+ @Benchmark
+ public void crossFormatMigrationJacksonInput(final Blackhole blackhole) {
+ final Dynamic result = this.gsonFixer.update(
BenchmarkBootstrap.BENCHMARK_TYPE,
this.jacksonData,
this.fromVersion,
- this.toVersion);
+ this.toVersion
+ );
blackhole.consume(result);
}
}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
index f618554..2dc134c 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
@@ -48,12 +48,100 @@
import java.util.concurrent.TimeUnit;
/**
- * JMH benchmark for TOML and XML format performance.
+ * JMH benchmark for TOML and XML DynamicOps implementations via Jackson.
*
- *
Measures DynamicOps operations and migration performance for
- * Jackson TOML and XML implementations.
+ *
This benchmark measures the performance of TOML and XML format operations
+ * using Jackson-based implementations ({@link JacksonTomlOps} and {@link JacksonXmlOps}).
+ * Both formats share Jackson's unified API, enabling direct performance comparison.
+ *
+ *
Benchmark Categories
+ *
+ *
Data Generation
+ *
Measure Dynamic object construction performance:
+ *
+ *
{@link #tomlGenerate} - Create Dynamic using JacksonTomlOps
+ *
{@link #xmlGenerate} - Create Dynamic using JacksonXmlOps
+ *
+ *
+ *
Field Access
+ *
Measure field read operations on existing data:
+ *
+ *
{@link #tomlFieldRead} - Read field from TOML-backed Dynamic
+ *
{@link #xmlFieldRead} - Read field from XML-backed Dynamic
+ *
+ *
+ *
Field Modification
+ *
Measure field write/set operations:
+ *
+ *
{@link #tomlFieldSet} - Set field on TOML-backed Dynamic
+ *
{@link #xmlFieldSet} - Set field on XML-backed Dynamic
+ *
+ *
+ *
Migration
+ *
Measure DataFixer migration performance:
+ *
+ *
{@link #tomlMigration} - Apply fix to TOML-backed data
+ *
{@link #xmlMigration} - Apply fix to XML-backed data
+ *
+ *
+ *
Implementations
+ *
+ *
Implementation
Library
Node Type
Use Case
+ *
+ *
{@link JacksonTomlOps}
+ *
Jackson Dataformat TOML
+ *
{@code JsonNode}
+ *
Configuration files, Rust ecosystem integration
+ *
+ *
+ *
{@link JacksonXmlOps}
+ *
Jackson Dataformat XML
+ *
{@code JsonNode}
+ *
Legacy systems, SOAP/REST APIs, document formats
+ *
+ *
+ *
+ *
Parameters
+ *
+ *
Parameter
Values
Description
+ *
payloadSize
SMALL, MEDIUM
Test data complexity (LARGE excluded for performance)
+ *
+ *
+ *
Note: The LARGE payload size is excluded from this benchmark because
+ * TOML and XML serialization typically have higher overhead than JSON/YAML,
+ * making large payloads impractical for typical use cases.
Measures the time to create a complete test data structure using
+ * Jackson's TOML dataformat module.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void tomlGenerate(final Blackhole blackhole) {
- final Dynamic data = BenchmarkDataGenerator.generate(
- JacksonTomlOps.INSTANCE, this.payloadSize);
+ final Dynamic data = BenchmarkDataGenerator.generate(this.tomlOps, this.payloadSize);
blackhole.consume(data);
}
/**
- * Benchmarks XML data generation.
+ * Benchmarks Dynamic object generation using JacksonXmlOps.
+ *
+ *
Measures the time to create a complete test data structure using
+ * Jackson's XML dataformat module.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void xmlGenerate(final Blackhole blackhole) {
- final Dynamic data = BenchmarkDataGenerator.generate(
- JacksonXmlOps.INSTANCE, this.payloadSize);
+ final Dynamic data = BenchmarkDataGenerator.generate(this.xmlOps, this.payloadSize);
blackhole.consume(data);
}
- // ==================== Field Access ====================
+ // ==================== Field Access Benchmarks ====================
/**
- * Benchmarks TOML field read access.
+ * Benchmarks field read access on TOML-backed Dynamic.
+ *
+ *
Measures the time to retrieve a single field from a pre-existing
+ * TOML-based Dynamic object.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void tomlFieldRead(final Blackhole blackhole) {
- final Dynamic field = this.tomlData.get("stringField0");
+ final Dynamic field = this.tomlData.get(FIELD_NAME);
blackhole.consume(field);
}
/**
- * Benchmarks XML field read access.
+ * Benchmarks field read access on XML-backed Dynamic.
+ *
+ *
Measures the time to retrieve a single field from a pre-existing
+ * XML-based Dynamic object.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void xmlFieldRead(final Blackhole blackhole) {
- final Dynamic field = this.xmlData.get("stringField0");
+ final Dynamic field = this.xmlData.get(FIELD_NAME);
blackhole.consume(field);
}
- // ==================== Field Modification ====================
+ // ==================== Field Modification Benchmarks ====================
/**
- * Benchmarks TOML field set operation.
+ * Benchmarks field set operation on TOML-backed Dynamic.
+ *
+ *
Measures the time to add a new field to a TOML-based Dynamic object.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void tomlFieldSet(final Blackhole blackhole) {
final Dynamic result = this.tomlData.set(
- "newField", this.tomlData.createString("newValue"));
+ "newField",
+ this.tomlData.createString("newValue")
+ );
blackhole.consume(result);
}
/**
- * Benchmarks XML field set operation.
+ * Benchmarks field set operation on XML-backed Dynamic.
+ *
+ *
Measures the time to add a new field to an XML-based Dynamic object.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void xmlFieldSet(final Blackhole blackhole) {
final Dynamic result = this.xmlData.set(
- "newField", this.xmlData.createString("newValue"));
+ "newField",
+ this.xmlData.createString("newValue")
+ );
blackhole.consume(result);
}
- // ==================== Migration ====================
+ // ==================== Migration Benchmarks ====================
/**
- * Benchmarks migration with TOML DynamicOps.
+ * Benchmarks DataFixer migration on TOML-backed data.
+ *
+ *
Measures the time to apply a single fix migration to TOML-based
+ * Dynamic data.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@@ -171,12 +332,16 @@ public void tomlMigration(final Blackhole blackhole) {
BenchmarkBootstrap.BENCHMARK_TYPE,
this.tomlData,
this.fromVersion,
- this.toVersion);
+ this.toVersion
+ );
blackhole.consume(result);
}
/**
- * Benchmarks migration with XML DynamicOps.
+ * Benchmarks DataFixer migration on XML-backed data.
+ *
+ *
Measures the time to apply a single fix migration to XML-based
+ * Dynamic data.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@@ -186,7 +351,8 @@ public void xmlMigration(final Blackhole blackhole) {
BenchmarkBootstrap.BENCHMARK_TYPE,
this.xmlData,
this.fromVersion,
- this.toVersion);
+ this.toVersion
+ );
blackhole.consume(result);
}
}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
index c387455..c0f2862 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
@@ -48,12 +48,97 @@
import java.util.concurrent.TimeUnit;
/**
- * JMH benchmark comparing SnakeYAML and Jackson YAML performance.
+ * JMH benchmark comparing YAML DynamicOps implementations: SnakeYAML vs Jackson YAML.
*
- *
Measures DynamicOps operations and migration performance for both
- * YAML implementations.
+ *
This benchmark measures the performance of YAML-based operations using two
+ * different underlying libraries: SnakeYAML ({@link SnakeYamlOps}) and Jackson YAML
+ * ({@link JacksonYamlOps}). YAML is commonly used for configuration files and
+ * human-readable data serialization.
+ *
+ *
Benchmark Categories
+ *
+ *
Data Generation
+ *
Measure Dynamic object construction performance:
+ *
+ *
{@link #snakeYamlGenerate} - Create Dynamic using SnakeYamlOps
+ *
{@link #jacksonYamlGenerate} - Create Dynamic using JacksonYamlOps
+ *
+ *
+ *
Field Access
+ *
Measure field read operations on existing data:
+ *
+ *
{@link #snakeYamlFieldRead} - Read field from SnakeYAML-backed Dynamic
+ *
{@link #jacksonYamlFieldRead} - Read field from Jackson YAML-backed Dynamic
+ *
+ *
+ *
Field Modification
+ *
Measure field write/set operations:
+ *
+ *
{@link #snakeYamlFieldSet} - Set field on SnakeYAML-backed Dynamic
+ *
{@link #jacksonYamlFieldSet} - Set field on Jackson YAML-backed Dynamic
+ *
+ *
+ *
Migration
+ *
Measure DataFixer migration performance:
+ *
+ *
{@link #snakeYamlMigration} - Apply fix to SnakeYAML-backed data
+ *
{@link #jacksonYamlMigration} - Apply fix to Jackson YAML-backed data
+ *
+ *
+ *
Implementations Compared
+ *
+ *
Implementation
Library
Node Type
Characteristics
+ *
+ *
{@link SnakeYamlOps}
+ *
SnakeYAML
+ *
{@code Object} (native Java types)
+ *
Native YAML library, uses Maps/Lists, anchors & aliases support
+ *
+ *
+ *
{@link JacksonYamlOps}
+ *
Jackson Dataformat YAML
+ *
{@code JsonNode}
+ *
Unified Jackson API, shares code with JSON, streaming support
+ *
+ *
+ *
+ *
Parameters
+ *
+ *
Parameter
Values
Description
+ *
payloadSize
SMALL, MEDIUM, LARGE
Test data complexity
+ *
+ *
+ *
Benchmark Configuration
+ *
+ *
Setting
Value
+ *
Warmup
5 iterations, 1 second each
+ *
Measurement
10 iterations, 1 second each
+ *
Forks
2
+ *
JVM Heap
2 GB min/max
+ *
Time Unit
Microseconds
+ *
+ *
+ *
Usage
+ *
{@code
+ * # Run all YAML benchmarks
+ * java -jar benchmarks.jar YamlBenchmark
+ *
+ * # Compare only generation performance
+ * java -jar benchmarks.jar "YamlBenchmark.*Generate"
+ *
+ * # Run SnakeYAML-only benchmarks
+ * java -jar benchmarks.jar "YamlBenchmark.snakeYaml.*"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar YamlBenchmark -p payloadSize=MEDIUM
+ * }
*
* @author Erik Pförtner
+ * @see JsonBenchmark
+ * @see TomlXmlBenchmark
+ * @see CrossFormatBenchmark
+ * @see de.splatgames.aether.datafixers.codec.yaml.snakeyaml.SnakeYamlOps
+ * @see de.splatgames.aether.datafixers.codec.yaml.jackson.JacksonYamlOps
* @since 1.0.0
*/
@BenchmarkMode({Mode.Throughput, Mode.AverageTime})
@@ -64,104 +149,176 @@
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
public class YamlBenchmark {
+ /**
+ * Field name used for read/write benchmarks.
+ *
+ *
References the first string field generated by {@link BenchmarkDataGenerator}.
+ */
+ private static final String FIELD_NAME = "stringField0";
+
+ /**
+ * Payload size parameter controlling test data complexity.
+ *
+ *
Injected by JMH to run benchmarks across different data sizes.
+ */
@Param({"SMALL", "MEDIUM", "LARGE"})
private PayloadSize payloadSize;
+ /**
+ * SnakeYAML DynamicOps implementation using native Java types.
+ */
+ private SnakeYamlOps snakeOps;
+
+ /**
+ * Jackson YAML DynamicOps implementation using JsonNode.
+ */
+ private JacksonYamlOps jacksonOps;
+
+ /**
+ * Pre-generated test data using SnakeYAML.
+ */
private Dynamic snakeYamlData;
+
+ /**
+ * Pre-generated test data using Jackson YAML.
+ */
private Dynamic jacksonYamlData;
+
+ /**
+ * DataFixer for migration benchmarks.
+ */
private DataFixer fixer;
+
+ /**
+ * Source version for migrations (v1).
+ */
private DataVersion fromVersion;
+
+ /**
+ * Target version for migrations (v2).
+ */
private DataVersion toVersion;
+ /**
+ * Initializes DynamicOps instances, test data, and DataFixer.
+ *
+ *
Both SnakeYAML and Jackson YAML data are pre-generated to isolate
+ * benchmark measurements from data creation overhead.
Measures the time to create a complete test data structure using
+ * SnakeYAML's native Java type representation (Maps and Lists).
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void snakeYamlGenerate(final Blackhole blackhole) {
- final Dynamic data = BenchmarkDataGenerator.generate(
- SnakeYamlOps.INSTANCE, this.payloadSize);
+ final Dynamic data = BenchmarkDataGenerator.generate(this.snakeOps, this.payloadSize);
blackhole.consume(data);
}
/**
- * Benchmarks Jackson YAML data generation.
+ * Benchmarks Dynamic object generation using JacksonYamlOps.
+ *
+ *
Measures the time to create a complete test data structure using
+ * Jackson's JsonNode representation for YAML.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonYamlGenerate(final Blackhole blackhole) {
- final Dynamic data = BenchmarkDataGenerator.generate(
- JacksonYamlOps.INSTANCE, this.payloadSize);
+ final Dynamic data = BenchmarkDataGenerator.generate(this.jacksonOps, this.payloadSize);
blackhole.consume(data);
}
- // ==================== Field Access ====================
+ // ==================== Field Access Benchmarks ====================
/**
- * Benchmarks SnakeYAML field read access.
+ * Benchmarks field read access on SnakeYAML-backed Dynamic.
+ *
+ *
Measures the time to retrieve a single field from a pre-existing
+ * SnakeYAML-based Dynamic object (backed by Java Map).
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void snakeYamlFieldRead(final Blackhole blackhole) {
- final Dynamic field = this.snakeYamlData.get("stringField0");
+ final Dynamic field = this.snakeYamlData.get(FIELD_NAME);
blackhole.consume(field);
}
/**
- * Benchmarks Jackson YAML field read access.
+ * Benchmarks field read access on Jackson YAML-backed Dynamic.
+ *
+ *
Measures the time to retrieve a single field from a pre-existing
+ * Jackson YAML-based Dynamic object.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonYamlFieldRead(final Blackhole blackhole) {
- final Dynamic field = this.jacksonYamlData.get("stringField0");
+ final Dynamic field = this.jacksonYamlData.get(FIELD_NAME);
blackhole.consume(field);
}
- // ==================== Field Modification ====================
+ // ==================== Field Modification Benchmarks ====================
/**
- * Benchmarks SnakeYAML field set operation.
+ * Benchmarks field set operation on SnakeYAML-backed Dynamic.
+ *
+ *
Measures the time to add a new field to a SnakeYAML-based Dynamic object.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void snakeYamlFieldSet(final Blackhole blackhole) {
final Dynamic result = this.snakeYamlData.set(
- "newField", this.snakeYamlData.createString("newValue"));
+ "newField",
+ this.snakeYamlData.createString("newValue")
+ );
blackhole.consume(result);
}
/**
- * Benchmarks Jackson YAML field set operation.
+ * Benchmarks field set operation on Jackson YAML-backed Dynamic.
+ *
+ *
Measures the time to add a new field to a Jackson YAML-based Dynamic object.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@Benchmark
public void jacksonYamlFieldSet(final Blackhole blackhole) {
final Dynamic result = this.jacksonYamlData.set(
- "newField", this.jacksonYamlData.createString("newValue"));
+ "newField",
+ this.jacksonYamlData.createString("newValue")
+ );
blackhole.consume(result);
}
- // ==================== Migration ====================
+ // ==================== Migration Benchmarks ====================
/**
- * Benchmarks migration with SnakeYAML DynamicOps.
+ * Benchmarks DataFixer migration on SnakeYAML-backed data.
+ *
+ *
Measures the time to apply a single fix migration to SnakeYAML-based
+ * Dynamic data.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@@ -171,12 +328,16 @@ public void snakeYamlMigration(final Blackhole blackhole) {
BenchmarkBootstrap.BENCHMARK_TYPE,
this.snakeYamlData,
this.fromVersion,
- this.toVersion);
+ this.toVersion
+ );
blackhole.consume(result);
}
/**
- * Benchmarks migration with Jackson YAML DynamicOps.
+ * Benchmarks DataFixer migration on Jackson YAML-backed data.
+ *
+ *
Measures the time to apply a single fix migration to Jackson YAML-based
+ * Dynamic data.
*
* @param blackhole JMH blackhole to prevent dead code elimination
*/
@@ -186,7 +347,8 @@ public void jacksonYamlMigration(final Blackhole blackhole) {
BenchmarkBootstrap.BENCHMARK_TYPE,
this.jacksonYamlData,
this.fromVersion,
- this.toVersion);
+ this.toVersion
+ );
blackhole.consume(result);
}
}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/package-info.java
new file mode 100644
index 0000000..d2c5b40
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/package-info.java
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * Format-focused JMH benchmarks comparing DynamicOps implementations in the Aether DataFixers framework.
+ *
+ *
This package contains benchmarks that compare the performance of different serialization
+ * format implementations. These benchmarks help users choose the optimal DynamicOps implementation
+ * for their specific use case based on empirical performance data.
Each format benchmark measures the following operations:
+ *
+ *
Data Generation: Time to create Dynamic objects from scratch
+ *
Field Read: Time to retrieve a single field from existing data
+ *
Field Set: Time to add/modify a field (creates new immutable structure)
+ *
Migration: Time to apply a DataFix to format-specific data
+ *
+ *
+ *
Running Format Benchmarks
+ *
{@code
+ * # Run all format benchmarks
+ * java -jar benchmarks.jar ".*format.*"
+ *
+ * # Run specific format benchmark
+ * java -jar benchmarks.jar JsonBenchmark
+ * java -jar benchmarks.jar YamlBenchmark
+ * java -jar benchmarks.jar TomlXmlBenchmark
+ * java -jar benchmarks.jar CrossFormatBenchmark
+ *
+ * # Run all JSON-related benchmarks
+ * java -jar benchmarks.jar ".*Json.*"
+ *
+ * # Run generation benchmarks across all formats
+ * java -jar benchmarks.jar ".*Benchmark.*Generate"
+ *
+ * # Run migration benchmarks across all formats
+ * java -jar benchmarks.jar ".*Benchmark.*Migration"
+ *
+ * # Run with specific payload size
+ * java -jar benchmarks.jar ".*format.*" -p payloadSize=MEDIUM
+ * }
+ *
+ *
Choosing a DynamicOps Implementation
+ *
Use these benchmark results to guide implementation selection:
+ *
+ *
Scenario
Recommended
Rationale
+ *
+ *
General JSON processing
+ *
GsonOps or JacksonJsonOps
+ *
Compare benchmarks; both are mature and fast
+ *
+ *
+ *
Configuration files (YAML)
+ *
SnakeYamlOps
+ *
Native YAML features (anchors, aliases)
+ *
+ *
+ *
Mixed Jackson ecosystem
+ *
JacksonJsonOps/JacksonYamlOps
+ *
Shared code, faster cross-format conversion
+ *
+ *
+ *
TOML configuration
+ *
JacksonTomlOps
+ *
Only TOML option; good for Rust interop
+ *
+ *
+ *
Legacy XML systems
+ *
JacksonXmlOps
+ *
Only XML option; document format support
+ *
+ *
+ *
+ *
Cross-Format Conversion
+ *
The {@link de.splatgames.aether.datafixers.benchmarks.format.CrossFormatBenchmark}
+ * measures conversion overhead between formats. Key insights:
+ *
+ *
Same-ecosystem: Jackson JSON ↔ Jackson YAML is fastest (shared JsonNode)
+ *
Cross-ecosystem: Gson ↔ SnakeYAML requires full tree traversal
+ *
Asymmetry: A→B may differ from B→A due to construction costs
+ *
+ *
+ *
Interpreting Results
+ *
+ *
Throughput: Higher ops/sec is better for high-volume scenarios
+ *
Average time: Lower latency is better for interactive applications
+ *
Scaling: Compare SMALL vs MEDIUM vs LARGE to understand data volume impact
+ *
Variance: High ± values may indicate GC sensitivity or JIT instability
+ *
+ *
+ * @see de.splatgames.aether.datafixers.benchmarks.format.JsonBenchmark
+ * @see de.splatgames.aether.datafixers.benchmarks.format.YamlBenchmark
+ * @see de.splatgames.aether.datafixers.benchmarks.format.TomlXmlBenchmark
+ * @see de.splatgames.aether.datafixers.benchmarks.format.CrossFormatBenchmark
+ * @see de.splatgames.aether.datafixers.api.dynamic.DynamicOps
+ * @since 1.0.0
+ */
+package de.splatgames.aether.datafixers.benchmarks.format;
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/package-info.java
new file mode 100644
index 0000000..be94d9b
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/package-info.java
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * JMH benchmark suite for the Aether DataFixers framework.
+ *
+ *
This package and its sub-packages provide comprehensive performance benchmarks
+ * for all major components of the Aether DataFixers system. The benchmarks use
+ * JMH (Java Microbenchmark Harness)
+ * for accurate, reliable performance measurements.
Creates fixers with varying numbers of fixes to measure migration
- * chain performance. All fixes use {@link NoOpDataFixerContext} to minimize
- * logging overhead during benchmarks.
+ *
This utility class provides various DataFixer configurations for measuring
+ * different aspects of migration performance. All created fixers use {@link NoOpDataFixerContext} to eliminate logging
+ * overhead during benchmark measurements.
+ *
+ *
Available Fixer Configurations
+ *
+ *
Method
Fix Count
Fix Types
Use Case
+ *
+ *
{@link #createSingleFixFixer()}
+ *
1
+ *
Rename
+ *
Baseline single-operation performance
+ *
+ *
+ *
{@link #createIdentityFixer()}
+ *
1
+ *
Identity (no-op)
+ *
Framework overhead measurement
+ *
+ *
+ *
{@link #createChainFixer(int)}
+ *
1-100
+ *
Rename (homogeneous)
+ *
Chain length scaling analysis
+ *
+ *
+ *
{@link #createMixedFixer(int)}
+ *
4+
+ *
Rename, Add, Remove, Transform
+ *
Realistic migration scenarios
+ *
+ *
+ *
{@link #createPlayerFixer()}
+ *
4
+ *
Mixed (realistic)
+ *
Domain-specific migration testing
+ *
+ *
+ *
+ *
Type References
+ *
Two type references are provided for categorizing benchmark data:
+ *
+ *
{@link #BENCHMARK_TYPE} - Generic benchmark data (used by most benchmarks)
+ *
{@link #PLAYER_TYPE} - Player-like data structures (for domain-specific tests)
+ *
+ *
+ *
Design Considerations
+ *
+ *
No-op context: All fixers use {@link NoOpDataFixerContext} to prevent
+ * logging from affecting benchmark measurements
+ *
GsonOps: All fixes use {@link GsonOps} as the reference DynamicOps
+ * implementation for consistency
+ *
Testkit integration: Uses {@link QuickFix} from the testkit module
+ * for efficient fix creation
*
* @author Erik Pförtner
+ * @see BenchmarkDataGenerator
+ * @see PayloadSize
+ * @see de.splatgames.aether.datafixers.testkit.factory.QuickFix
* @since 1.0.0
*/
public final class BenchmarkBootstrap {
/**
- * Type reference for benchmark data.
+ * Type reference for generic benchmark data.
+ *
+ *
Used by most benchmarks as the default type for test data. The type
+ * name "benchmark" is intentionally generic to avoid confusion with domain-specific types.
*/
public static final TypeReference BENCHMARK_TYPE = new TypeReference("benchmark");
/**
* Type reference for player-like benchmark data.
+ *
+ *
Used by benchmarks that simulate game player data migrations,
+ * providing a realistic domain-specific testing scenario.
+ *
+ * @see #createPlayerFixer()
+ * @see BenchmarkDataGenerator#generatePlayerData(DynamicOps)
*/
public static final TypeReference PLAYER_TYPE = new TypeReference("player");
+ /**
+ * Private constructor to prevent instantiation.
+ */
private BenchmarkBootstrap() {
// Utility class
}
/**
- * Creates a DataFixer with a single rename field fix.
+ * Creates a DataFixer with a single field rename fix (v1 → v2).
+ *
+ *
This is the simplest non-trivial fixer configuration, useful for
+ * measuring baseline single-operation performance. The fix renames a field from "oldName" to "newName".
+ *
+ *
Version mapping: v1 → v2 (single step)
*
* @return a new DataFixer configured for single-fix benchmarks
+ * @see #createIdentityFixer()
*/
@NotNull
public static DataFixer createSingleFixFixer() {
@@ -79,10 +173,22 @@ public static DataFixer createSingleFixFixer() {
/**
* Creates a DataFixer with an identity fix (no-op transformation).
*
- *
Useful as a baseline to measure framework overhead without
- * actual data transformation.
+ *
The identity fixer passes data through without modification, useful for
+ * measuring pure framework overhead including:
+ *
+ *
Version checking and fix selection
+ *
Dynamic wrapper creation and manipulation
+ *
DataResult monad operations
+ *
Type reference resolution
+ *
+ *
+ *
Comparing identity fixer performance against {@link #createSingleFixFixer()}
+ * reveals the actual cost of field operations versus framework overhead.
*
- * @return a new DataFixer with identity fix
+ *
Version mapping: v1 → v2 (no data changes)
+ *
+ * @return a new DataFixer with an identity (pass-through) fix
+ * @see #createSingleFixFixer()
*/
@NotNull
public static DataFixer createIdentityFixer() {
@@ -93,14 +199,29 @@ public static DataFixer createIdentityFixer() {
}
/**
- * Creates a DataFixer with a chain of sequential fixes.
+ * Creates a DataFixer with a chain of sequential homogeneous fixes.
+ *
+ *
Each fix in the chain performs a field rename operation (field1 → field2,
+ * field2 → field3, etc.), simulating migration scenarios with multiple consecutive version upgrades. This
+ * configuration is ideal for measuring how migration performance scales with chain length.
*
- *
Each fix in the chain performs a field rename operation,
- * simulating real-world migration scenarios with multiple version upgrades.
*
- * @param fixCount the number of fixes in the chain (1 to 100)
- * @return a new DataFixer with the specified number of fixes
+ *
Typical Parameter Values for Benchmarks
+ *
+ *
fixCount
Scenario
+ *
1
Baseline (compare with {@link #createSingleFixFixer()})
+ *
5
Short chain (minor version updates)
+ *
10
Medium chain (typical upgrade path)
+ *
25
Long chain (significant version gap)
+ *
50
Stress test (extended migration)
+ *
100
Maximum supported (extreme case)
+ *
+ *
+ * @param fixCount the number of fixes in the chain (must be between 1 and 100 inclusive)
+ * @return a new DataFixer with the specified number of sequential rename fixes
* @throws IllegalArgumentException if fixCount is less than 1 or greater than 100
+ * @see #createMixedFixer(int)
*/
@NotNull
public static DataFixer createChainFixer(final int fixCount) {
@@ -126,13 +247,29 @@ public static DataFixer createChainFixer(final int fixCount) {
}
/**
- * Creates a DataFixer with mixed fix types for realistic benchmarking.
+ * Creates a DataFixer with mixed heterogeneous fix types for realistic benchmarking.
+ *
+ *
Unlike {@link #createChainFixer(int)} which uses only rename operations,
+ * this method creates a chain with rotating fix types that more accurately represent real-world migration
+ * scenarios:
*
- *
Includes rename, add field, remove field, and transform operations
- * to simulate a realistic migration chain.
+ *
+ *
Position (mod 4)
Fix Type
Operation
+ *
0
Rename
Renames a field
+ *
1
Add
Adds a new string field with default value
+ *
2
Remove
Removes a field
+ *
3
Transform
Transforms field value (string concatenation)
+ *
*
- * @param fixCount the number of fixes in the chain (must be >= 4)
- * @return a new DataFixer with mixed fix types
+ *
Comparing mixed fixer performance against chain fixer performance
+ * reveals the relative cost of different fix operations.
+ *
+ * @param fixCount the number of fixes in the chain (must be at least 4 to include all fix types)
+ * @return a new DataFixer with mixed fix types cycling through rename, add, remove, and transform operations
+ * @throws IllegalArgumentException if fixCount is less than 4
+ * @see #createChainFixer(int)
*/
@NotNull
public static DataFixer createMixedFixer(final int fixCount) {
@@ -156,7 +293,23 @@ public static DataFixer createMixedFixer(final int fixCount) {
/**
* Creates a DataFixer for player data migration benchmarks.
*
- * @return a new DataFixer configured for player data
+ *
This fixer simulates a realistic game player data migration scenario
+ * with four sequential fixes representing typical schema evolution:
+ *
+ *
+ *
Version
Fix
Description
+ *
v1 → v2
Rename
{@code name} → {@code playerName}
+ *
v2 → v3
Add
Add {@code score} field (default: 0)
+ *
v3 → v4
Transform
Double the {@code level} value
+ *
v4 → v5
Remove
Remove {@code tempField}
+ *
+ *
+ *
Use with {@link BenchmarkDataGenerator#generatePlayerData(DynamicOps)} for
+ * complete domain-specific migration testing.
+ *
+ * @return a new DataFixer configured for player data migrations (v1 → v5)
+ * @see #PLAYER_TYPE
+ * @see BenchmarkDataGenerator#generatePlayerData(DynamicOps)
*/
@NotNull
public static DataFixer createPlayerFixer() {
@@ -178,11 +331,20 @@ public static DataFixer createPlayerFixer() {
.build();
}
- private static DataFix createMixedFix(
- final int fromVersion,
- final int toVersion,
- final int fixType
- ) {
+ /**
+ * Creates a specific fix type based on the fixType selector.
+ *
+ *
Internal factory method used by {@link #createMixedFixer(int)} to create
+ * different fix types in a rotating pattern.
+ *
+ * @param fromVersion the source version for the fix
+ * @param toVersion the target version for the fix
+ * @param fixType the fix type selector (0=rename, 1=add, 2=remove, 3=transform)
+ * @return a DataFix of the specified type
+ */
+ private static DataFix createMixedFix(final int fromVersion,
+ final int toVersion,
+ final int fixType) {
return switch (fixType) {
case 0 -> QuickFix.renameField(
GsonOps.INSTANCE,
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java
index 7f48696..e3b635e 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java
@@ -29,41 +29,125 @@
import org.jetbrains.annotations.NotNull;
/**
- * Utility class for generating benchmark test data.
+ * Factory for generating benchmark test data with configurable complexity.
*
- *
Generates {@link Dynamic} objects with configurable complexity based on
- * {@link PayloadSize} settings. Uses the testkit's {@link TestDataBuilder}
- * for efficient, format-agnostic data construction.
+ *
This utility class creates {@link Dynamic} objects of varying sizes and
+ * structures for use in JMH benchmarks. Data generation is format-agnostic, working with any {@link DynamicOps}
+ * implementation.
+ *
+ *
Data Generation Methods
+ *
+ *
Method
Structure
Use Case
+ *
+ *
{@link #generate(DynamicOps, PayloadSize)}
+ *
Complex (fields + nesting + lists)
+ *
General-purpose benchmarks
+ *
+ *
+ *
{@link #generatePlayerData(DynamicOps)}
+ *
Domain-specific (player data)
+ *
Realistic migration scenarios
+ *
+ *
+ *
{@link #generateFlat(DynamicOps, int)}
+ *
Flat object (fields only)
+ *
Basic operation benchmarks
+ *
+ *
+ *
+ *
Generated Data Structure
+ *
The main {@link #generate(DynamicOps, PayloadSize)} method creates objects with:
Testkit integration: Uses {@link TestDataBuilder} for fluent,
+ * type-safe data construction
+ *
Format agnostic: Works with any DynamicOps (Gson, Jackson, YAML, etc.)
+ *
Deterministic: Generated data is reproducible for benchmark consistency
+ * (except timestamp fields)
+ *
Configurable complexity: {@link PayloadSize} controls data volume
+ *
+ *
+ *
Usage Example
+ *
{@code
+ * // In a JMH benchmark
+ * @Setup(Level.Iteration)
+ * public void setup() {
+ * // Generate medium-complexity test data
+ * this.input = BenchmarkDataGenerator.generate(GsonOps.INSTANCE, PayloadSize.MEDIUM);
+ *
+ * // Or generate player-specific data
+ * this.playerData = BenchmarkDataGenerator.generatePlayerData(GsonOps.INSTANCE);
+ * }
+ * }
*
* @author Erik Pförtner
+ * @see PayloadSize
+ * @see BenchmarkBootstrap
+ * @see de.splatgames.aether.datafixers.testkit.TestDataBuilder
* @since 1.0.0
*/
public final class BenchmarkDataGenerator {
+ /**
+ * Private constructor to prevent instantiation.
+ */
private BenchmarkDataGenerator() {
// Utility class
}
/**
- * Generates benchmark data with the specified payload size.
+ * Generates benchmark data with the specified payload size and complexity.
*
- *
Creates a complex object structure including:
+ *
Creates a complex object structure including:
*
- *
Primitive fields (strings, integers, booleans)
- *
Nested objects up to the configured depth
- *
A list with the configured number of items
+ *
Primitive fields: String, integer, and boolean fields based on
+ * {@link PayloadSize#getFieldCount()}
+ *
Nested objects: Recursive nesting up to
+ * {@link PayloadSize#getNestingDepth()} levels
+ *
List with items: An "items" array with
+ * {@link PayloadSize#getListSize()} objects
*
*
- * @param ops the DynamicOps to use for data creation
- * @param size the payload size configuration
- * @param the underlying value type
- * @return a new Dynamic containing the generated data
+ *
Field Naming Patterns
+ *
+ *
Field Type
Pattern
Example
+ *
String
{@code stringFieldN}
{@code stringField0: "value0"}
+ *
Integer
{@code intFieldN}
{@code intField0: 0}
+ *
Boolean
{@code boolFieldN}
{@code boolField0: true}
+ *
+ *
+ * @param ops the DynamicOps implementation to use for data creation
+ * @param size the payload size configuration controlling data complexity
+ * @param the underlying value type of the DynamicOps
+ * @return a new Dynamic containing the generated benchmark data
*/
@NotNull
- public static Dynamic generate(
- @NotNull final DynamicOps ops,
- @NotNull final PayloadSize size
- ) {
+ public static Dynamic generate(@NotNull final DynamicOps ops,
+ @NotNull final PayloadSize size) {
final TestDataBuilder builder = TestData.using(ops).object();
// Add primitive fields
@@ -93,18 +177,39 @@ public static Dynamic generate(
/**
* Generates a player-like data structure for realistic migration benchmarks.
*
- *
Creates a structure similar to game player data with:
- *
- *
Identity fields (id, name)
- *
Stats (level, experience, health)
- *
Position object (x, y, z, world)
- *
Inventory list
- *
Achievements list
- *
+ *
Creates a structure simulating game player data, useful for domain-specific
+ * migration testing with {@link BenchmarkBootstrap#createPlayerFixer()}.
*
- * @param ops the DynamicOps to use for data creation
- * @param the underlying value type
- * @return a new Dynamic containing player-like data
+ *
Data Characteristics
+ *
+ *
Component
Count
Description
+ *
Top-level fields
6
id, name, level, experience, health, active
+ *
Nested objects
2
position (4 fields), stats (4 fields)
+ *
Inventory slots
36
Standard inventory size
+ *
Achievements
6
String list
+ *
+ *
+ * @param ops the DynamicOps implementation to use for data creation
+ * @param the underlying value type of the DynamicOps
+ * @return a new Dynamic containing player-like benchmark data
+ * @see BenchmarkBootstrap#createPlayerFixer()
+ * @see BenchmarkBootstrap#PLAYER_TYPE
*/
@NotNull
public static Dynamic generatePlayerData(@NotNull final DynamicOps ops) {
@@ -147,18 +252,32 @@ public static Dynamic generatePlayerData(@NotNull final DynamicOps ops
}
/**
- * Generates a simple flat object for basic operation benchmarks.
+ * Generates a simple flat object with only string fields.
+ *
+ *
Creates a minimal object structure without nesting or lists, useful for
+ * benchmarking basic field access and manipulation operations with minimal traversal overhead.
*
- * @param ops the DynamicOps to use for data creation
- * @param fieldCount the number of fields to generate
- * @param the underlying value type
- * @return a new Dynamic containing flat data
+ *
This method is useful for isolating field operation costs from
+ * structural complexity overhead.
+ *
+ * @param ops the DynamicOps implementation to use for data creation
+ * @param fieldCount the number of string fields to generate (field0 through field(n-1))
+ * @param the underlying value type of the DynamicOps
+ * @return a new Dynamic containing a flat object with string fields
*/
@NotNull
- public static Dynamic generateFlat(
- @NotNull final DynamicOps ops,
- final int fieldCount
- ) {
+ public static Dynamic generateFlat(@NotNull final DynamicOps ops,
+ final int fieldCount) {
final TestDataBuilder builder = TestData.using(ops).object();
for (int i = 0; i < fieldCount; i++) {
builder.put("field" + i, "value" + i);
@@ -166,11 +285,25 @@ public static Dynamic generateFlat(
return builder.build();
}
- private static void addNestedObject(
- final TestDataBuilder builder,
- final String key,
- final int depth
- ) {
+ /**
+ * Recursively adds nested object structures to the builder.
+ *
+ *
Creates a chain of nested objects, each containing:
+ *
+ *
{@code level} - the current nesting depth
+ *
{@code data} - a string identifying the nesting level
+ *
{@code timestamp} - current system time (for data variation)
+ *
{@code child} - the next nested level (if depth > 0)
+ *
+ *
+ * @param builder the TestDataBuilder to add the nested structure to
+ * @param key the field name for this nested object
+ * @param depth remaining nesting levels (stops when depth reaches 0)
+ * @param the underlying value type of the builder
+ */
+ private static void addNestedObject(final TestDataBuilder builder,
+ final String key,
+ final int depth) {
if (depth <= 0) {
return;
}
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java
index 82fe8a3..13b56f3 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/PayloadSize.java
@@ -23,42 +23,144 @@
package de.splatgames.aether.datafixers.benchmarks.util;
/**
- * Defines payload sizes for benchmark test data generation.
+ * Defines payload size configurations for benchmark test data generation.
+ *
+ *
This enum controls the complexity of data generated by
+ * {@link BenchmarkDataGenerator#generate(de.splatgames.aether.datafixers.api.dynamic.DynamicOps, PayloadSize)}.
+ * Each configuration specifies three dimensions of data complexity:
*
- *
Each size configuration controls the complexity of generated test data:
*
- *
SMALL - Quick benchmarks, minimal data (5 fields, 2 nesting levels, 10 list items)
- *
MEDIUM - Balanced benchmarks (20 fields, 4 nesting levels, 100 list items)
- *
LARGE - Stress testing (50 fields, 6 nesting levels, 1000 list items)
+ *
Field count: Number of primitive fields (string, int, boolean triplets)
+ *
Nesting depth: Levels of nested object recursion
+ *
List size: Number of items in the generated list
*
*
+ *
Configuration Summary
+ *
+ *
+ *
Size
+ *
Fields
+ *
Nesting
+ *
List Items
+ *
Use Case
+ *
+ *
+ *
{@link #SMALL}
+ *
5
+ *
2 levels
+ *
10
+ *
Quick iterations, CI pipelines
+ *
+ *
+ *
{@link #MEDIUM}
+ *
20
+ *
4 levels
+ *
100
+ *
Typical performance testing
+ *
+ *
+ *
{@link #LARGE}
+ *
50
+ *
6 levels
+ *
1000
+ *
Stress testing, worst-case analysis
+ *
+ *
+ *
+ *
JMH Parameterization
+ *
This enum is designed for use with JMH's {@code @Param} annotation:
+ *
* @author Erik Pförtner
+ * @see BenchmarkDataGenerator
* @since 1.0.0
*/
public enum PayloadSize {
/**
- * Small payload: 5 fields, 2 nesting levels, 10 list items.
- * Suitable for quick benchmark iterations.
+ * Small payload configuration for quick benchmark iterations.
+ *
+ *
Generates minimal data suitable for:
+ *
+ *
Rapid development feedback loops
+ *
CI/CD pipeline validation
+ *
Baseline measurements with minimal GC impact
+ *
+ *
+ *
Configuration: 5 fields, 2 nesting levels, 10 list items
*/
SMALL(5, 2, 10),
/**
- * Medium payload: 20 fields, 4 nesting levels, 100 list items.
- * Balanced for typical performance testing.
+ * Medium payload configuration for balanced performance testing.
+ *
+ *
Generates moderately complex data suitable for:
+ *
+ *
Standard benchmark runs
+ *
Typical real-world data volume simulation
+ *
Comparing different implementations
+ *
+ *
+ *
Configuration: 20 fields, 4 nesting levels, 100 list items
*/
MEDIUM(20, 4, 100),
/**
- * Large payload: 50 fields, 6 nesting levels, 1000 list items.
- * Suitable for stress testing and worst-case analysis.
+ * Large payload configuration for stress testing and worst-case analysis.
+ *
+ *
Generates substantial data suitable for:
+ *
+ *
Memory pressure and GC behavior analysis
+ *
Worst-case performance scenarios
+ *
Scalability limit identification
+ *
+ *
+ *
Configuration: 50 fields, 6 nesting levels, 1000 list items
+ *
+ *
Note: Large payloads may require increased heap size and longer
+ * warmup periods for stable measurements.
*/
LARGE(50, 6, 1000);
+ /**
+ * Number of primitive field triplets (string, int, boolean) to generate.
+ */
private final int fieldCount;
+
+ /**
+ * Maximum depth of nested object recursion.
+ */
private final int nestingDepth;
+
+ /**
+ * Number of items in the generated list.
+ */
private final int listSize;
+ /**
+ * Constructs a payload size configuration.
+ *
+ * @param fieldCount number of top-level field triplets
+ * @param nestingDepth maximum nesting levels for nested objects
+ * @param listSize number of items in generated lists
+ */
PayloadSize(final int fieldCount, final int nestingDepth, final int listSize) {
this.fieldCount = fieldCount;
this.nestingDepth = nestingDepth;
@@ -66,27 +168,39 @@ public enum PayloadSize {
}
/**
- * Returns the number of top-level fields to generate.
+ * Returns the number of primitive field triplets to generate.
*
- * @return the field count
+ *
Each field "count" results in three actual fields:
+ *
+ *
{@code stringFieldN} - String value
+ *
{@code intFieldN} - Integer value
+ *
{@code boolFieldN} - Boolean value
+ *
+ *
+ * @return the number of field triplets (total fields = fieldCount × 3)
*/
public int getFieldCount() {
return this.fieldCount;
}
/**
- * Returns the maximum nesting depth for nested objects.
+ * Returns the maximum nesting depth for recursive nested objects.
+ *
+ *
A depth of N creates N levels of nested objects, each containing
+ * a "child" field pointing to the next level until depth reaches 0.
*
- * @return the nesting depth
+ * @return the maximum nesting depth (0 = no nesting)
*/
public int getNestingDepth() {
return this.nestingDepth;
}
/**
- * Returns the number of items to generate in lists.
+ * Returns the number of items to generate in the "items" list.
+ *
+ *
Each item is an object with id, quantity, and active fields.
*
- * @return the list size
+ * @return the number of list items
*/
public int getListSize() {
return this.listSize;
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/package-info.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/package-info.java
new file mode 100644
index 0000000..5673ed4
--- /dev/null
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/package-info.java
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2026 Splatgames.de Software and Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * Utility classes for JMH benchmark infrastructure in the Aether DataFixers framework.
+ *
+ *
This package provides the foundational components that all benchmark classes depend on
+ * for test data generation, DataFixer configuration, and payload management. These utilities
+ * ensure consistent, reproducible benchmark conditions across different benchmark categories.
{@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap} provides
+ * several DataFixer configurations for different benchmark scenarios:
+ *
+ *
Configuration
Fix Count
Purpose
+ *
Single Fix
1
Baseline single-operation performance
+ *
Identity
1 (no-op)
Framework overhead measurement
+ *
Chain (N)
1-100
Chain length scaling analysis
+ *
Mixed (N)
4+
Realistic heterogeneous migrations
+ *
Player
4
Domain-specific scenario testing
+ *
+ *
+ *
Payload Size Configurations
+ *
{@link de.splatgames.aether.datafixers.benchmarks.util.PayloadSize} defines three
+ * complexity levels for generated test data:
+ *
+ *
Size
Fields
Nesting
List Items
Use Case
+ *
SMALL
5
2
10
Quick iterations, CI
+ *
MEDIUM
20
4
100
Standard testing
+ *
LARGE
50
6
1000
Stress testing
+ *
+ *
+ *
Integration with Testkit
+ *
This package builds upon the {@code aether-datafixers-testkit} module:
+ *
+ *
{@link de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator} uses
+ * {@code TestDataBuilder} for fluent data construction
Both utilities leverage {@code MockSchemas} for lightweight schema instances
+ *
+ *
+ * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkBootstrap
+ * @see de.splatgames.aether.datafixers.benchmarks.util.BenchmarkDataGenerator
+ * @see de.splatgames.aether.datafixers.benchmarks.util.PayloadSize
+ * @see de.splatgames.aether.datafixers.testkit
+ * @since 1.0.0
+ */
+package de.splatgames.aether.datafixers.benchmarks.util;
From 0196f6d20a154ed18ab1a37f49dc9064395eed62 Mon Sep 17 00:00:00 2001
From: Erik
Date: Sat, 31 Jan 2026 17:23:53 +0100
Subject: [PATCH 09/10] Mark benchmark classes as `final` to enforce
immutability and improve clarity.
---
.../datafixers/benchmarks/codec/CollectionCodecBenchmark.java | 2 +-
.../datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java | 2 +-
.../benchmarks/concurrent/ConcurrentMigrationBenchmark.java | 2 +-
.../datafixers/benchmarks/core/MultiFixChainBenchmark.java | 2 +-
.../datafixers/benchmarks/core/SchemaLookupBenchmark.java | 2 +-
.../aether/datafixers/benchmarks/core/SingleFixBenchmark.java | 2 +-
.../datafixers/benchmarks/format/CrossFormatBenchmark.java | 2 +-
.../aether/datafixers/benchmarks/format/JsonBenchmark.java | 2 +-
.../aether/datafixers/benchmarks/format/TomlXmlBenchmark.java | 2 +-
.../aether/datafixers/benchmarks/format/YamlBenchmark.java | 2 +-
10 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
index 56405aa..981d9ed 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
@@ -146,7 +146,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public class CollectionCodecBenchmark {
+public final class CollectionCodecBenchmark {
/**
* The number of elements in test lists, injected by JMH.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
index 7e9c8da..15cd191 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
@@ -147,7 +147,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public class PrimitiveCodecBenchmark {
+public final class PrimitiveCodecBenchmark {
/**
* Test boolean value for encoding benchmarks.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
index a1830bf..6b905ec 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
@@ -157,7 +157,7 @@
@Warmup(iterations = 3, time = 2, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 2, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public class ConcurrentMigrationBenchmark {
+public final class ConcurrentMigrationBenchmark {
// ==================== Concurrent Migration Benchmarks ====================
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
index 2b3e535..d992f1a 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
@@ -112,7 +112,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public class MultiFixChainBenchmark {
+public final class MultiFixChainBenchmark {
/**
* The number of fixes in the chain, injected by JMH.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
index 0b72395..daf9272 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
@@ -109,7 +109,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public class SchemaLookupBenchmark {
+public final class SchemaLookupBenchmark {
/**
* Benchmarks exact version lookup performance.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
index c74d288..fad4f96 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
@@ -101,7 +101,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public class SingleFixBenchmark {
+public final class SingleFixBenchmark {
/**
* Benchmarks a single field rename operation.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
index ac0bce9..2bc7504 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
@@ -173,7 +173,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public class CrossFormatBenchmark {
+public final class CrossFormatBenchmark {
/**
* Payload size parameter controlling test data complexity.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
index d0f64b2..7cb3e74 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
@@ -149,7 +149,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public class JsonBenchmark {
+public final class JsonBenchmark {
/**
* Field name used for read/write benchmarks.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
index 2dc134c..675e4eb 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
@@ -150,7 +150,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public class TomlXmlBenchmark {
+public final class TomlXmlBenchmark {
/**
* Field name used for read/write benchmarks.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
index c0f2862..3e9009f 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
@@ -147,7 +147,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public class YamlBenchmark {
+public final class YamlBenchmark {
/**
* Field name used for read/write benchmarks.
From 2096a6101fdce842450efc8c284d5f345736b566 Mon Sep 17 00:00:00 2001
From: Erik
Date: Sat, 31 Jan 2026 17:31:57 +0100
Subject: [PATCH 10/10] Make benchmark classes non-final and introduce
consistent use of `@Nullable` annotations for improved flexibility and
clarity. Add a fixed timestamp for reproducible benchmarks in
`BenchmarkDataGenerator`.
---
.../codec/CollectionCodecBenchmark.java | 2 +-
.../benchmarks/codec/PrimitiveCodecBenchmark.java | 2 +-
.../concurrent/ConcurrentMigrationBenchmark.java | 2 +-
.../benchmarks/core/MultiFixChainBenchmark.java | 2 +-
.../benchmarks/core/SchemaLookupBenchmark.java | 2 +-
.../benchmarks/core/SingleFixBenchmark.java | 2 +-
.../benchmarks/format/CrossFormatBenchmark.java | 2 +-
.../benchmarks/format/JsonBenchmark.java | 4 +++-
.../benchmarks/format/TomlXmlBenchmark.java | 2 +-
.../benchmarks/format/YamlBenchmark.java | 2 +-
.../benchmarks/util/BenchmarkDataGenerator.java | 15 +++++++++++----
11 files changed, 23 insertions(+), 14 deletions(-)
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
index 981d9ed..56405aa 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/CollectionCodecBenchmark.java
@@ -146,7 +146,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public final class CollectionCodecBenchmark {
+public class CollectionCodecBenchmark {
/**
* The number of elements in test lists, injected by JMH.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
index 15cd191..7e9c8da 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/codec/PrimitiveCodecBenchmark.java
@@ -147,7 +147,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public final class PrimitiveCodecBenchmark {
+public class PrimitiveCodecBenchmark {
/**
* Test boolean value for encoding benchmarks.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
index 6b905ec..a1830bf 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/concurrent/ConcurrentMigrationBenchmark.java
@@ -157,7 +157,7 @@
@Warmup(iterations = 3, time = 2, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 2, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public final class ConcurrentMigrationBenchmark {
+public class ConcurrentMigrationBenchmark {
// ==================== Concurrent Migration Benchmarks ====================
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
index d992f1a..2b3e535 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/MultiFixChainBenchmark.java
@@ -112,7 +112,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public final class MultiFixChainBenchmark {
+public class MultiFixChainBenchmark {
/**
* The number of fixes in the chain, injected by JMH.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
index daf9272..0b72395 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SchemaLookupBenchmark.java
@@ -109,7 +109,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public final class SchemaLookupBenchmark {
+public class SchemaLookupBenchmark {
/**
* Benchmarks exact version lookup performance.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
index fad4f96..c74d288 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/core/SingleFixBenchmark.java
@@ -101,7 +101,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public final class SingleFixBenchmark {
+public class SingleFixBenchmark {
/**
* Benchmarks a single field rename operation.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
index 2bc7504..ac0bce9 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/CrossFormatBenchmark.java
@@ -173,7 +173,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public final class CrossFormatBenchmark {
+public class CrossFormatBenchmark {
/**
* Payload size parameter controlling test data complexity.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
index 7cb3e74..1a87c58 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/JsonBenchmark.java
@@ -44,6 +44,7 @@
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
+import org.jetbrains.annotations.Nullable;
import org.openjdk.jmh.infra.Blackhole;
import java.util.concurrent.TimeUnit;
@@ -149,7 +150,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public final class JsonBenchmark {
+public class JsonBenchmark {
/**
* Field name used for read/write benchmarks.
@@ -197,6 +198,7 @@ public final class JsonBenchmark {
*
May be {@code null} if no dedicated Jackson fixer is configured.
* In that case, cross-format migration behavior is measured instead.
*/
+ @Nullable
private DataFixer jacksonFixer;
/**
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
index 675e4eb..2dc134c 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/TomlXmlBenchmark.java
@@ -150,7 +150,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public final class TomlXmlBenchmark {
+public class TomlXmlBenchmark {
/**
* Field name used for read/write benchmarks.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
index 3e9009f..c0f2862 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/format/YamlBenchmark.java
@@ -147,7 +147,7 @@
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 10, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2, jvmArgs = {"-Xms2G", "-Xmx2G"})
-public final class YamlBenchmark {
+public class YamlBenchmark {
/**
* Field name used for read/write benchmarks.
diff --git a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java
index e3b635e..19a93cc 100644
--- a/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java
+++ b/aether-datafixers-benchmarks/src/main/java/de/splatgames/aether/datafixers/benchmarks/util/BenchmarkDataGenerator.java
@@ -86,8 +86,7 @@
*
Testkit integration: Uses {@link TestDataBuilder} for fluent,
* type-safe data construction
*
Format agnostic: Works with any DynamicOps (Gson, Jackson, YAML, etc.)
- *
Deterministic: Generated data is reproducible for benchmark consistency
- * (except timestamp fields)
+ *
Deterministic: Generated data is fully reproducible for benchmark consistency
*
Configurable complexity: {@link PayloadSize} controls data volume
*
*
@@ -112,6 +111,14 @@
*/
public final class BenchmarkDataGenerator {
+ /**
+ * Fixed timestamp value used for deterministic benchmark data generation.
+ *
+ *
Using a constant timestamp ensures reproducible benchmark results
+ * across different runs, eliminating variability from system time.
+ */
+ private static final long FIXED_TIMESTAMP = 1704067200000L; // 2024-01-01 00:00:00 UTC
+
/**
* Private constructor to prevent instantiation.
*/
@@ -292,7 +299,7 @@ public static Dynamic generateFlat(@NotNull final DynamicOps ops,
*
*
{@code level} - the current nesting depth
*
{@code data} - a string identifying the nesting level
- *
{@code timestamp} - current system time (for data variation)
+ *
{@code timestamp} - fixed timestamp for reproducibility
*
{@code child} - the next nested level (if depth > 0)