diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml
index eca6eabb218..4e20472994b 100644
--- a/.github/workflows/create_release.yml
+++ b/.github/workflows/create_release.yml
@@ -20,6 +20,7 @@ jobs:
# the tag MAJOR.MINOR.PATCH event, but we still need to deploy the maven-release-plugin master commit.
token: ${{ secrets.GH_TOKEN }}
fetch-depth: 1 # only need the HEAD commit as license check isn't run
+ submodules: true
- name: Cache local Maven repository
uses: actions/cache@v2
with:
diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml
index 0454fe71e1e..8b5fe603222 100644
--- a/.github/workflows/deploy.yml
+++ b/.github/workflows/deploy.yml
@@ -23,6 +23,7 @@ jobs:
# We push Javadocs to the gh-pages branch on commit.
token: ${{ secrets.GH_TOKEN }}
fetch-depth: 0 # allow build-bin/idl_to_gh_pages to get the full history
+ submodules: true
- name: Cache local Maven repository
uses: actions/cache@v2
with:
diff --git a/.github/workflows/docker_push.yml b/.github/workflows/docker_push.yml
index 64a83bc54d0..71d21c6b3b1 100644
--- a/.github/workflows/docker_push.yml
+++ b/.github/workflows/docker_push.yml
@@ -17,6 +17,7 @@ jobs:
uses: actions/checkout@v2
with:
fetch-depth: 1 # only needed to get the sha label
+ submodules: true
# We can't cache Docker without using buildx because GH actions restricts /var/lib/docker
# That's ok because DOCKER_PARENT_IMAGE is always ghcr.io and local anyway.
- name: Docker Push
diff --git a/.github/workflows/helm_release.yml b/.github/workflows/helm_release.yml
index b315b0c27ba..3a80465b014 100644
--- a/.github/workflows/helm_release.yml
+++ b/.github/workflows/helm_release.yml
@@ -14,6 +14,8 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v1
+ with:
+ submodules: true
- name: Configure Git
run: |
diff --git a/.github/workflows/helm_test.yml b/.github/workflows/helm_test.yml
index 2b766e224bd..dfa26fcb4e4 100644
--- a/.github/workflows/helm_test.yml
+++ b/.github/workflows/helm_test.yml
@@ -16,6 +16,8 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v1
+ with:
+ submodules: true
- name: Configure Git
run: |
diff --git a/.github/workflows/readme_test.yml b/.github/workflows/readme_test.yml
index 10019bd6b54..fd81d155392 100644
--- a/.github/workflows/readme_test.yml
+++ b/.github/workflows/readme_test.yml
@@ -30,6 +30,7 @@ jobs:
uses: actions/checkout@v2
with:
fetch-depth: 1
+ submodules: true
# Setup latest JDK. We do this to ensure users don't need to use the same version as our
# release process. Release uses JDK 11, the last version that can target 1.6 bytecode.
- name: Setup java
@@ -82,7 +83,7 @@ jobs:
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-maven-
- name: zipkin-server/README.md # Tests the build which is re-used for a few images
- run: ./mvnw -T1C -q --batch-mode -DskipTests --also-make -pl zipkin-server clean package
+ run: ./mvnw -T1C -q --batch-mode -DskipTests -Dcheckstyle.skip=true --also-make -pl zipkin-server clean package
- name: docker/README.md - openzipkin/zipkin
run: |
build-bin/docker/docker_build openzipkin/zipkin:test &&
@@ -102,34 +103,4 @@ jobs:
build-bin/docker/docker_test_image openzipkin/zipkin-ui:test
env:
DOCKER_FILE: docker/test-images/zipkin-ui/Dockerfile
- RELEASE_FROM_MAVEN_BUILD: true
- - name: docker/test-images/zipkin-cassandra/README.md
- run: |
- build-bin/docker/docker_build openzipkin/zipkin-cassandra:test &&
- build-bin/docker/docker_test_image openzipkin/zipkin-cassandra:test
- env:
- DOCKER_FILE: docker/test-images/zipkin-cassandra/Dockerfile
- - name: docker/test-images/zipkin-elasticsearch6/README.md
- run: |
- build-bin/docker/docker_build openzipkin/zipkin-elasticsearch6:test &&
- build-bin/docker/docker_test_image openzipkin/zipkin-elasticsearch6:test
- env:
- DOCKER_FILE: docker/test-images/zipkin-elasticsearch6/Dockerfile
- - name: docker/test-images/zipkin-elasticsearch7/README.md
- run: |
- build-bin/docker/docker_build openzipkin/zipkin-elasticsearch7:test &&
- build-bin/docker/docker_test_image openzipkin/zipkin-elasticsearch7:test
- env:
- DOCKER_FILE: docker/test-images/zipkin-elasticsearch7/Dockerfile
- - name: docker/test-images/zipkin-kafka/README.md
- run: |
- build-bin/docker/docker_build openzipkin/zipkin-kafka:test &&
- build-bin/docker/docker_test_image openzipkin/zipkin-kafka:test
- env:
- DOCKER_FILE: docker/test-images/zipkin-kafka/Dockerfile
- - name: docker/test-images/zipkin-mysql/README.md
- run: |
- build-bin/docker/docker_build openzipkin/zipkin-mysql:test &&
- build-bin/docker/docker_test_image openzipkin/zipkin-mysql:test
- env:
- DOCKER_FILE: docker/test-images/zipkin-mysql/Dockerfile
+ RELEASE_FROM_MAVEN_BUILD: true
\ No newline at end of file
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
deleted file mode 100644
index ab4f765aa0e..00000000000
--- a/.github/workflows/test.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-# yamllint --format github .github/workflows/test.yml
----
-name: test
-
-# We don't test documentation-only commits.
-on:
- # We run tests on non-tagged pushes to master that aren't a commit made by the release plugin
- push:
- tags: ""
- branches: master
- paths-ignore:
- - "**/*.md"
- - "charts/**"
- # We also run tests on pull requests targeted at the master branch.
- pull_request:
- branches: master
- paths-ignore:
- - "**/*.md"
- - "charts/**"
-
-jobs:
- test:
- runs-on: ubuntu-20.04 # newest available distribution, aka focal
- if: "!contains(github.event.head_commit.message, 'maven-release-plugin')"
- steps:
- - name: Checkout Repository
- uses: actions/checkout@v2
- with:
- fetch-depth: 0 # full git history for license check
- - name: Cache local Maven repository
- uses: actions/cache@v2
- with:
- path: ~/.m2/repository
- key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
- restore-keys: ${{ runner.os }}-maven-
- - name: Cache NPM Packages
- uses: actions/cache@v2
- with:
- path: ~/.npm
- key: ${{ runner.os }}-npm-packages-${{ hashFiles('zipkin-lens/package-lock.json') }}
- - name: Test without Docker
- run: build-bin/maven_go_offline && build-bin/test -Ddocker.skip=true
- test_docker:
- runs-on: ubuntu-20.04 # newest available distribution, aka focal
- if: "!contains(github.event.head_commit.message, 'maven-release-plugin')"
- strategy:
- matrix:
- include:
- - name: zipkin-collector-kafka
- - name: zipkin-collector-rabbitmq
- - name: zipkin-storage-cassandra
- - name: zipkin-storage-elasticsearch
- - name: zipkin-storage-mysql-v1
- steps:
- - name: Checkout Repository
- uses: actions/checkout@v2
- with:
- fetch-depth: 1 # -Dlicense.skip=true so we don't need a full clone
- - name: Cache local Maven repository
- uses: actions/cache@v2
- with:
- path: ~/.m2/repository
- key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
- restore-keys: ${{ runner.os }}-maven-
- # We can't cache Docker without using buildx because GH actions restricts /var/lib/docker
- # That's ok because DOCKER_PARENT_IMAGE is always ghcr.io and local anyway.
- - name: Test with Docker
- run:
- | # configure_test seeds NPM cache, which isn't needed for these tests
- build-bin/maven/maven_go_offline &&
- build-bin/docker/configure_docker &&
- build-bin/test -pl :${{ matrix.name }} --am -Dlicense.skip=true
diff --git a/.gitignore b/.gitignore
index bc8776b02e7..0f257f47102 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,3 +26,5 @@ _site/
# This project does not use Yarn but some developers may use it to e.g., start zipkin-lens dev server.
# It doesn't hurt to just exclude it here.
yarn.lock
+
+zipkin-server/server-starter/src/main/resources/version.properties
\ No newline at end of file
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 00000000000..10610c8508d
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "skywalking"]
+ path = skywalking
+ url = https://github.com/apache/skywalking.git
diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml
index d43647bdb0a..fdc6412e56c 100644
--- a/benchmarks/pom.xml
+++ b/benchmarks/pom.xml
@@ -105,11 +105,6 @@
zipkin-server${project.version}
-
- ${project.groupId}.zipkin2
- zipkin-storage-elasticsearch
- ${project.version}
- com.squareup.wirewire-runtime
@@ -127,18 +122,6 @@
test
-
-
- ${project.groupId}.zipkin2
- zipkin-storage-cassandra
- ${project.version}
-
-
- io.netty
- *
-
-
- com.datastax.ossjava-driver-core
@@ -151,12 +134,6 @@
-
- ${project.groupId}.zipkin2
- zipkin-storage-mysql-v1
- ${project.version}
- test
- org.mariadb.jdbcmariadb-java-client
diff --git a/benchmarks/src/main/java/zipkin2/collector/MetricsBenchmarks.java b/benchmarks/src/main/java/zipkin2/collector/MetricsBenchmarks.java
deleted file mode 100644
index 420b5c65113..00000000000
--- a/benchmarks/src/main/java/zipkin2/collector/MetricsBenchmarks.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector;
-
-import io.micrometer.core.instrument.MeterRegistry;
-import io.micrometer.prometheus.PrometheusConfig;
-import io.micrometer.prometheus.PrometheusMeterRegistry;
-import java.util.concurrent.TimeUnit;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Measurement;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.Threads;
-import org.openjdk.jmh.annotations.Warmup;
-import org.openjdk.jmh.runner.Runner;
-import org.openjdk.jmh.runner.RunnerException;
-import org.openjdk.jmh.runner.options.Options;
-import org.openjdk.jmh.runner.options.OptionsBuilder;
-import zipkin2.server.internal.MicrometerCollectorMetrics;
-
-@Measurement(iterations = 80, time = 1)
-@Warmup(iterations = 20, time = 1)
-@Fork(3)
-@BenchmarkMode(Mode.AverageTime)
-@OutputTimeUnit(TimeUnit.MICROSECONDS)
-@State(Scope.Thread)
-@Threads(1)
-public class MetricsBenchmarks {
- static final int LONG_SPAN = 5000;
- static final int MEDIUM_SPAN = 1000;
- static final int SHORT_SPAN = 500;
- private MeterRegistry registry = new PrometheusMeterRegistry(PrometheusConfig.DEFAULT);
- private InMemoryCollectorMetrics inMemoryCollectorMetrics = new InMemoryCollectorMetrics();
- private MicrometerCollectorMetrics micrometerCollectorMetrics = new MicrometerCollectorMetrics(registry);
-
- @Benchmark
- public int incrementBytes_longSpans_inMemory() {
- return incrementBytes(inMemoryCollectorMetrics, LONG_SPAN);
- }
-
- @Benchmark
- public int incrementBytes_longSpans_Actuate() {
- return incrementBytes(micrometerCollectorMetrics, LONG_SPAN);
- }
-
- @Benchmark
- public int incrementBytes_mediumSpans_inMemory() {
- return incrementBytes(inMemoryCollectorMetrics, MEDIUM_SPAN);
- }
-
- @Benchmark
- public int incrementBytes_mediumSpans_Actuate() {
- return incrementBytes(micrometerCollectorMetrics, MEDIUM_SPAN);
- }
-
- @Benchmark
- public int incrementBytes_shortSpans_inMemory() {
- return incrementBytes(inMemoryCollectorMetrics, SHORT_SPAN);
- }
-
- @Benchmark
- public int incrementBytes_shortSpans_Actuate() {
- return incrementBytes(micrometerCollectorMetrics, SHORT_SPAN);
- }
-
- private int incrementBytes(CollectorMetrics collectorMetrics, int bytes) {
- collectorMetrics.incrementBytes(bytes);
- return bytes;
- }
-
- // Convenience main entry-point
- public static void main(String[] args) throws RunnerException {
- Options opt = new OptionsBuilder()
- .include(".*" + MetricsBenchmarks.class.getSimpleName() + ".*")
- .threads(40)
- .build();
-
- new Runner(opt).run();
- }
-}
diff --git a/benchmarks/src/main/java/zipkin2/elasticsearch/internal/BulkRequestBenchmarks.java b/benchmarks/src/main/java/zipkin2/elasticsearch/internal/BulkRequestBenchmarks.java
deleted file mode 100644
index 051d046ccc4..00000000000
--- a/benchmarks/src/main/java/zipkin2/elasticsearch/internal/BulkRequestBenchmarks.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.elasticsearch.internal;
-
-import com.linecorp.armeria.common.HttpRequest;
-import com.linecorp.armeria.common.HttpRequestWriter;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.PooledByteBufAllocator;
-import java.util.concurrent.TimeUnit;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Measurement;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.Threads;
-import org.openjdk.jmh.annotations.Warmup;
-import org.openjdk.jmh.runner.Runner;
-import org.openjdk.jmh.runner.RunnerException;
-import org.openjdk.jmh.runner.options.Options;
-import org.openjdk.jmh.runner.options.OptionsBuilder;
-import zipkin2.Span;
-import zipkin2.codec.SpanBytesDecoder;
-import zipkin2.elasticsearch.ElasticsearchStorage;
-import zipkin2.elasticsearch.internal.BulkCallBuilder.IndexEntry;
-import zipkin2.elasticsearch.internal.client.HttpCall;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static zipkin2.elasticsearch.ElasticsearchVersion.V6_0;
-import static zipkin2.storage.cassandra.internal.Resources.resourceToString;
-
-@Measurement(iterations = 5, time = 1)
-@Warmup(iterations = 10, time = 1)
-@Fork(3)
-@BenchmarkMode(Mode.SampleTime)
-@OutputTimeUnit(TimeUnit.MICROSECONDS)
-@State(Scope.Thread)
-@Threads(2)
-public class BulkRequestBenchmarks {
- static final Span CLIENT_SPAN =
- SpanBytesDecoder.JSON_V2.decodeOne(resourceToString("/zipkin2-client.json").getBytes(UTF_8));
-
- final ElasticsearchStorage es = ElasticsearchStorage.newBuilder(() -> null).build();
- final long indexTimestamp = CLIENT_SPAN.timestampAsLong() / 1000L;
- final String spanIndex =
- es.indexNameFormatter().formatTypeAndTimestampForInsert("span", '-', indexTimestamp);
- final IndexEntry entry =
- BulkCallBuilder.newIndexEntry(spanIndex, "span", CLIENT_SPAN, BulkIndexWriter.SPAN);
-
- @Benchmark public ByteBuf writeRequest_singleSpan() {
- return BulkCallBuilder.serialize(PooledByteBufAllocator.DEFAULT, entry, true);
- }
-
- @Benchmark public HttpRequest buildAndWriteRequest_singleSpan() {
- BulkCallBuilder builder = new BulkCallBuilder(es, V6_0, "index-span");
- builder.index(spanIndex, "span", CLIENT_SPAN, BulkIndexWriter.SPAN);
- HttpCall.RequestSupplier supplier = builder.build().request;
- HttpRequestWriter request = HttpRequest.streaming(supplier.headers());
- supplier.writeBody(request::tryWrite);
- return request;
- }
-
- @Benchmark public HttpRequest buildAndWriteRequest_tenSpans() {
- BulkCallBuilder builder = new BulkCallBuilder(es, V6_0, "index-span");
- for (int i = 0; i < 10; i++) {
- builder.index(spanIndex, "span", CLIENT_SPAN, BulkIndexWriter.SPAN);
- }
- HttpCall.RequestSupplier supplier = builder.build().request;
- HttpRequestWriter request = HttpRequest.streaming(supplier.headers());
- supplier.writeBody(request::tryWrite);
- return request;
- }
-
- // Convenience main entry-point
- public static void main(String[] args) throws RunnerException {
- Options opt = new OptionsBuilder()
- .addProfiler("gc")
- .include(".*" + BulkRequestBenchmarks.class.getSimpleName() + ".*")
- .build();
-
- new Runner(opt).run();
- }
-}
diff --git a/benchmarks/src/main/java/zipkin2/internal/DelayLimiterBenchmarks.java b/benchmarks/src/main/java/zipkin2/internal/DelayLimiterBenchmarks.java
deleted file mode 100644
index 87c44bdd1e2..00000000000
--- a/benchmarks/src/main/java/zipkin2/internal/DelayLimiterBenchmarks.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.internal;
-
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Measurement;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.Threads;
-import org.openjdk.jmh.annotations.Warmup;
-import org.openjdk.jmh.runner.Runner;
-import org.openjdk.jmh.runner.RunnerException;
-import org.openjdk.jmh.runner.options.Options;
-import org.openjdk.jmh.runner.options.OptionsBuilder;
-
-@Measurement(iterations = 5, time = 1)
-@Warmup(iterations = 10, time = 1)
-@Fork(3)
-@BenchmarkMode(Mode.SampleTime)
-@OutputTimeUnit(TimeUnit.MICROSECONDS)
-@State(Scope.Thread)
-@Threads(2)
-public class DelayLimiterBenchmarks {
-
- final Random rng = new Random();
- final DelayLimiter limiter = DelayLimiter.newBuilder()
- .ttl(1L, TimeUnit.HOURS) // legacy default from Cassandra
- .cardinality(5 * 4000) // Ex. 5 site tags with cardinality 4000 each
- .build();
-
- @Benchmark public boolean shouldInvoke_randomData() {
- return limiter.shouldInvoke(rng.nextLong());
- }
-
- @Benchmark public boolean shouldInvoke_sameData() {
- return limiter.shouldInvoke(1L);
- }
-
- // Convenience main entry-point
- public static void main(String[] args) throws RunnerException {
- Options opt = new OptionsBuilder()
- .addProfiler("gc")
- .include(".*" + DelayLimiterBenchmarks.class.getSimpleName() + ".*")
- .build();
-
- new Runner(opt).run();
- }
-}
diff --git a/benchmarks/src/main/java/zipkin2/internal/ReadBufferBenchmarks.java b/benchmarks/src/main/java/zipkin2/internal/ReadBufferBenchmarks.java
deleted file mode 100644
index 7a7ad10b780..00000000000
--- a/benchmarks/src/main/java/zipkin2/internal/ReadBufferBenchmarks.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.internal;
-
-import java.util.concurrent.TimeUnit;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Measurement;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.Threads;
-import org.openjdk.jmh.annotations.Warmup;
-import org.openjdk.jmh.runner.Runner;
-import org.openjdk.jmh.runner.RunnerException;
-import org.openjdk.jmh.runner.options.Options;
-import org.openjdk.jmh.runner.options.OptionsBuilder;
-
-@Measurement(iterations = 5, time = 1)
-@Warmup(iterations = 10, time = 1)
-@Fork(3)
-@BenchmarkMode(Mode.SampleTime)
-@OutputTimeUnit(TimeUnit.MICROSECONDS)
-@State(Scope.Thread)
-@Threads(1)
-public class ReadBufferBenchmarks {
- byte[] longBuff = {
- (byte) 0x01, (byte) 0x02, (byte) 0x03, (byte) 0x04,
- (byte) 0x05, (byte) 0x06, (byte) 0x07, (byte) 0x08,
- };
-
- @Benchmark public long readLong() {
- int pos = 0;
- return (longBuff[pos] & 0xffL) << 56
- | (longBuff[pos + 1] & 0xffL) << 48
- | (longBuff[pos + 2] & 0xffL) << 40
- | (longBuff[pos + 3] & 0xffL) << 32
- | (longBuff[pos + 4] & 0xffL) << 24
- | (longBuff[pos + 5] & 0xffL) << 16
- | (longBuff[pos + 6] & 0xffL) << 8
- | (longBuff[pos + 7] & 0xffL);
- }
-
- @Benchmark public long readLong_localArray() {
- int pos = 0;
- byte[] longBuff = this.longBuff;
- return (longBuff[pos] & 0xffL) << 56
- | (longBuff[pos + 1] & 0xffL) << 48
- | (longBuff[pos + 2] & 0xffL) << 40
- | (longBuff[pos + 3] & 0xffL) << 32
- | (longBuff[pos + 4] & 0xffL) << 24
- | (longBuff[pos + 5] & 0xffL) << 16
- | (longBuff[pos + 6] & 0xffL) << 8
- | (longBuff[pos + 7] & 0xffL);
- }
-
- @Benchmark public long readLong_8arity_localArray() {
- int pos = 0;
- return readLong(
- longBuff[pos] & 0xff,
- longBuff[pos + 1] & 0xff,
- longBuff[pos + 2] & 0xff,
- longBuff[pos + 3] & 0xff,
- longBuff[pos + 4] & 0xff,
- longBuff[pos + 5] & 0xff,
- longBuff[pos + 6] & 0xff,
- longBuff[pos + 7] & 0xff
- );
- }
-
- @Benchmark public long readLong_8arity() {
- int pos = 0;
- byte[] longBuff = this.longBuff;
- return readLong(
- longBuff[pos] & 0xff,
- longBuff[pos + 1] & 0xff,
- longBuff[pos + 2] & 0xff,
- longBuff[pos + 3] & 0xff,
- longBuff[pos + 4] & 0xff,
- longBuff[pos + 5] & 0xff,
- longBuff[pos + 6] & 0xff,
- longBuff[pos + 7] & 0xff
- );
- }
-
- static long readLong(int p0, int p1, int p2, int p3, int p4, int p5, int p6, int p7) {
- return (p0 & 0xffL) << 56
- | (p1 & 0xffL) << 48
- | (p2 & 0xffL) << 40
- | (p3 & 0xffL) << 32
- | (p4 & 0xffL) << 24
- | (p5 & 0xffL) << 16
- | (p6 & 0xffL) << 8
- | (p7 & 0xffL);
- }
-
- @Benchmark public long readLongReverseBytes() {
- return Long.reverseBytes(readLong());
- }
-
- // Convenience main entry-point
- public static void main(String[] args) throws RunnerException {
- Options opt = new OptionsBuilder()
- .include(".*" + ReadBufferBenchmarks.class.getSimpleName() + ".*")
- .addProfiler("gc")
- .build();
-
- new Runner(opt).run();
- }
-}
diff --git a/benchmarks/src/main/java/zipkin2/internal/WriteBufferBenchmarks.java b/benchmarks/src/main/java/zipkin2/internal/WriteBufferBenchmarks.java
deleted file mode 100644
index b6926778e69..00000000000
--- a/benchmarks/src/main/java/zipkin2/internal/WriteBufferBenchmarks.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.internal;
-
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-import java.util.concurrent.TimeUnit;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Measurement;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.Threads;
-import org.openjdk.jmh.annotations.Warmup;
-import org.openjdk.jmh.runner.Runner;
-import org.openjdk.jmh.runner.RunnerException;
-import org.openjdk.jmh.runner.options.Options;
-import org.openjdk.jmh.runner.options.OptionsBuilder;
-
-@Measurement(iterations = 5, time = 1)
-@Warmup(iterations = 10, time = 1)
-@Fork(3)
-@BenchmarkMode(Mode.AverageTime)
-@OutputTimeUnit(TimeUnit.MICROSECONDS)
-@State(Scope.Thread)
-@Threads(1)
-public class WriteBufferBenchmarks {
- static final Charset UTF_8 = Charset.forName("UTF-8");
- // Order id = d07c4daa-0fa9-4c03-90b1-e06c4edae250 doesn't exist
- static final String CHINESE_UTF8 = "订单d07c4daa-0fa9-4c03-90b1-e06c4edae250不存在";
- static final int CHINESE_UTF8_SIZE = UTF_8.encode(CHINESE_UTF8).remaining();
- /* length-prefixing a 1 KiB span */
- static final int TEST_INT = 1024;
- /* epoch micros timestamp */
- static final long TEST_LONG = 1472470996199000L;
- byte[] bytes = new byte[8];
- WriteBuffer buffer = WriteBuffer.wrap(bytes);
-
- @Benchmark public int utf8SizeInBytes_chinese() {
- return WriteBuffer.utf8SizeInBytes(CHINESE_UTF8);
- }
-
- @Benchmark public byte[] writeUtf8_chinese() {
- byte[] bytesUtf8 = new byte[CHINESE_UTF8_SIZE];
- WriteBuffer.wrap(bytesUtf8, 0).writeUtf8(CHINESE_UTF8);
- return bytesUtf8;
- }
-
- @Benchmark public ByteBuffer writeUtf8_chinese_jdk() {
- return UTF_8.encode(CHINESE_UTF8);
- }
-
- @Benchmark public int varIntSizeInBytes_32() {
- return WriteBuffer.varintSizeInBytes(TEST_INT);
- }
-
- @Benchmark public int varIntSizeInBytes_64() {
- return WriteBuffer.varintSizeInBytes(TEST_LONG);
- }
-
- @Benchmark public int writeVarint_32() {
- buffer.writeVarint(TEST_INT);
- return buffer.pos();
- }
-
- @Benchmark public int writeVarint_64() {
- buffer.writeVarint(TEST_LONG);
- return buffer.pos();
- }
-
- @Benchmark public int writeLongLe() {
- buffer.writeLongLe(TEST_LONG);
- return buffer.pos();
- }
-
- // Convenience main entry-point
- public static void main(String[] args) throws RunnerException {
- Options opt = new OptionsBuilder()
- .include(".*" + WriteBufferBenchmarks.class.getSimpleName() + ".*")
- .build();
-
- new Runner(opt).run();
- }
-}
diff --git a/benchmarks/src/main/java/zipkin2/server/internal/throttle/ThrottledCallBenchmarks.java b/benchmarks/src/main/java/zipkin2/server/internal/throttle/ThrottledCallBenchmarks.java
deleted file mode 100644
index 5e9ce58bef1..00000000000
--- a/benchmarks/src/main/java/zipkin2/server/internal/throttle/ThrottledCallBenchmarks.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.server.internal.throttle;
-
-import com.linecorp.armeria.common.metric.NoopMeterRegistry;
-import com.netflix.concurrency.limits.limit.FixedLimit;
-import com.netflix.concurrency.limits.limiter.SimpleLimiter;
-import java.io.IOException;
-import java.util.concurrent.Executor;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Predicate;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Measurement;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.TearDown;
-import org.openjdk.jmh.annotations.Threads;
-import org.openjdk.jmh.annotations.Warmup;
-import org.openjdk.jmh.runner.Runner;
-import org.openjdk.jmh.runner.RunnerException;
-import org.openjdk.jmh.runner.options.Options;
-import org.openjdk.jmh.runner.options.OptionsBuilder;
-import zipkin2.Call;
-import zipkin2.Callback;
-
-@Measurement(iterations = 5, time = 1)
-@Warmup(iterations = 10, time = 1)
-@Fork(3)
-@BenchmarkMode(Mode.SampleTime)
-@OutputTimeUnit(TimeUnit.MICROSECONDS)
-@State(Scope.Thread)
-@Threads(2)
-public class ThrottledCallBenchmarks {
- ExecutorService fakeCallExecutor = Executors.newSingleThreadExecutor();
- ExecutorService executor = Executors.newSingleThreadExecutor();
- ThrottledCall call;
-
- @Setup public void setup() {
- executor = Executors.newSingleThreadExecutor();
- fakeCallExecutor = Executors.newSingleThreadExecutor();
- SimpleLimiter limiter = SimpleLimiter.newBuilder().limit(FixedLimit.of(1)).build();
- LimiterMetrics metrics = new LimiterMetrics(NoopMeterRegistry.get());
- Predicate isOverCapacity = RejectedExecutionException.class::isInstance;
- call =
- new ThrottledCall(new FakeCall(fakeCallExecutor), executor, limiter, metrics, isOverCapacity);
- }
-
- @TearDown public void tearDown() {
- executor.shutdown();
- fakeCallExecutor.shutdown();
- }
-
- @Benchmark public Object execute() throws IOException {
- return call.clone().execute();
- }
-
- @Benchmark public void execute_overCapacity() throws IOException {
- ThrottledCall overCapacity = (ThrottledCall) call.clone();
- ((FakeCall) overCapacity.delegate).overCapacity = true;
-
- try {
- overCapacity.execute();
- } catch (RejectedExecutionException e) {
- assert e == OVER_CAPACITY;
- }
- }
-
- @Benchmark public void execute_throttled() throws IOException {
- call.limiter.acquire(null); // capacity is 1, so this will overdo it.
- call.clone().execute();
- }
-
- static final RejectedExecutionException OVER_CAPACITY = new RejectedExecutionException();
-
- static final class FakeCall extends Call.Base {
- final Executor executor;
- boolean overCapacity = false;
-
- FakeCall(Executor executor) {
- this.executor = executor;
- }
-
- @Override public Void doExecute() throws IOException {
- if (overCapacity) throw OVER_CAPACITY;
- return null;
- }
-
- @Override public void doEnqueue(Callback callback) {
- executor.execute(() -> {
- if (overCapacity) {
- callback.onError(OVER_CAPACITY);
- } else {
- callback.onSuccess(null);
- }
- });
- }
-
- @Override public FakeCall clone() {
- return new FakeCall(executor);
- }
- }
-
- // Convenience main entry-point
- public static void main(String[] args) throws RunnerException {
- Options opt = new OptionsBuilder()
- .addProfiler("gc")
- .include(".*" + ThrottledCallBenchmarks.class.getSimpleName())
- .build();
-
- new Runner(opt).run();
- }
-}
diff --git a/build-bin/maven/maven_build b/build-bin/maven/maven_build
index edb9069fd02..5c28f0966ec 100755
--- a/build-bin/maven/maven_build
+++ b/build-bin/maven/maven_build
@@ -20,5 +20,5 @@ if [ -x ./mvnw ]; then alias mvn=${PWD}/mvnw; fi
(
if [ "${MAVEN_PROJECT_BASEDIR:-.}" != "." ]; then cd ${MAVEN_PROJECT_BASEDIR}; fi
- mvn -T1C -q --batch-mode -DskipTests package "$@"
+ mvn -T1C -q --batch-mode -DskipTests -Dcheckstyle.skip=true package "$@"
)
diff --git a/build-bin/maven/maven_deploy b/build-bin/maven/maven_deploy
index 2dd2ae2a267..1cde935ca36 100755
--- a/build-bin/maven/maven_deploy
+++ b/build-bin/maven/maven_deploy
@@ -20,4 +20,4 @@ export MAVEN_OPTS="$($(dirname "$0")/maven_opts)"
# This script deploys a SNAPSHOT or release version to Sonatype.
#
# Note: In CI, `configure_maven_deploy` must be called before invoking this.
-./mvnw --batch-mode -s ./.settings.xml -Prelease -nsu -DskipTests clean deploy $@
+./mvnw --batch-mode -s ./.settings.xml -Prelease -nsu -DskipTests -Dcheckstyle.skip=true clean deploy $@
diff --git a/build-bin/maven/maven_release b/build-bin/maven/maven_release
index 0ef28e48117..7832ad6f79a 100755
--- a/build-bin/maven/maven_release
+++ b/build-bin/maven/maven_release
@@ -30,6 +30,7 @@ release_branch=${2:-master}
# Checkout master, as we release from master, not a tag ref
git fetch --no-tags --prune --depth=1 origin +refs/heads/${release_branch}:refs/remotes/origin/${release_branch}
git checkout ${release_branch}
+git submodule update --init --recursive
# Ensure no one pushed commits since this release tag as it would fail later commands
commit_local_release_branch=$(git show --pretty='format:%H' ${release_branch})
@@ -40,4 +41,4 @@ if [ "$commit_local_release_branch" != "$commit_remote_release_branch" ]; then
fi
# Prepare and push release commits and the version tag (N.N.N), which triggers deployment.
-./mvnw --batch-mode -nsu -DreleaseVersion=${release_version} -Denforcer.fail=false -Darguments="-DskipTests -Denforcer.fail=false" release:prepare
+./mvnw --batch-mode -nsu -DreleaseVersion=${release_version} -Denforcer.fail=false -Darguments="-DskipTests -Denforcer.fail=false -Dcheckstyle.skip=true" release:prepare
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 75a3813acbc..9cc7ec9a222 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -79,7 +79,7 @@ LABEL org.opencontainers.image.description="Zipkin slim distribution on OpenJDK
COPY --from=install --chown=${USER} /install/zipkin-slim/ /zipkin/
-EXPOSE 9411
+EXPOSE 9411 9412
FROM base-server as zipkin
LABEL org.opencontainers.image.description="Zipkin full distribution on OpenJDK and Alpine Linux"
@@ -90,4 +90,4 @@ ENV MODULE_OPTS=
COPY --from=install --chown=${USER} /install/zipkin/ /zipkin/
# Zipkin's full distribution includes Scribe support (albeit disabled)
-EXPOSE 9410 9411
+EXPOSE 9411 9412
diff --git a/pom.xml b/pom.xml
index 497ea330398..e53edd70b8b 100755
--- a/pom.xml
+++ b/pom.xml
@@ -25,10 +25,6 @@
zipkinzipkin-tests
- zipkin-junit
- zipkin-junit5
- zipkin-storage
- zipkin-collectorzipkin-server
@@ -522,7 +518,7 @@
- [1.8,16)
+ [1.8,18)
diff --git a/skywalking b/skywalking
new file mode 160000
index 00000000000..8e529ee9560
--- /dev/null
+++ b/skywalking
@@ -0,0 +1 @@
+Subproject commit 8e529ee95604fb01a8bd31c272763393f3c70525
diff --git a/zipkin-collector/README.md b/zipkin-collector/README.md
deleted file mode 100644
index 38e824cfc6c..00000000000
--- a/zipkin-collector/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# zipkin-collector
-
-Modules here implement popular transport options available by default in
-the [server build](../zipkin-server).
-
-Please note all modules here require JRE 8+
-
-These libraries are also usable outside the server, for example in
-custom collectors or storage pipelines. While compatibility guarantees
-are strong, choices may be dropped over time.
-
-Collector modules ending in `-v1` are discouraged for new sites as they
-use an older data model. At some point in the future, we will stop
-publishing v1 collector options.
diff --git a/zipkin-collector/activemq/RATIONALE.md b/zipkin-collector/activemq/RATIONALE.md
deleted file mode 100644
index b4c589a6d65..00000000000
--- a/zipkin-collector/activemq/RATIONALE.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Rational for collector-activemq
-
-## Diverse need
-ActiveMQ was formerly requested in April, 2018 through issue #1990 which had two other thumbs-up. An
-early draft of this implementation was developed by @IAMTJW and resulting in another user asking for
-it. In June of 2019 there were a couple more requests for this on gitter, notably about Amazon MQ.
-
-## On ActiveMQ 5.x
-All users who expressed interest were interestd in ActiveMQ 5.x (aka Classic), not Artemis.
-Moreover, at the time of creation Amazon MQ only supported ActiveMQ 5.x.
-
-Artemis has higher throughput potential, but has more conflicting dependencies and would add 8MiB to
-the server. Moreover, no-one has asked for it.
-
-## On part of the default server
-ActiveMQ's client is 2MiB, which will increase the jar size, something that we've been tracking
-recently. To be fair, this is not a large module. In comparison, one dependency of Kafka, `zstd-jni`
-alone is almost 4MiB. There are no dependencies likely to conflict at runtime, and only one dodgy
-dependency, [hawtbuf](https://github.com/fusesource/hawtbuf), on account of it being abandoned since
-2014.
-
-Apart from size, ActiveMQ is a stable integration, included in Spring Boot, and could be useful for
-other integrations as an in-memory queue. Moreover, bundling makes integration with zipkin-aws far
-easier in the same way as bundling elasticsearch does.
-
-## On a potential single-transport client
-
-This package is using the normal activemq-jms client. During a [mail thread](http://activemq.2283324.n4.nabble.com/Interest-in-using-ActiveMQ-as-a-trace-data-transport-for-Zipkin-td4749755.html), we learned the
-the STOMP and AMQP 1.0 protocol are the more portable options for a portable integration as
-ActiveMQ, Artemis and RabbitMQ all support these. On the other hand Kafka does not support these
-protocols. Any future portability work could be limited by this. Meanwhile, using the standard JMS
-client will make troubleshooting most natural to end users.
diff --git a/zipkin-collector/activemq/README.md b/zipkin-collector/activemq/README.md
deleted file mode 100644
index f5e6982eb19..00000000000
--- a/zipkin-collector/activemq/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# collector-activemq
-
-## ActiveMQCollector
-This collector consumes an ActiveMQ 5.x queue for messages that contain a list of spans. Underneath
-this uses the ActiveMQ 5.x JMS client, which has two notable dependencies `slf4j-api` and `hawtbuf`.
-
-The message's binary data includes a list of spans. Supported encodings
-are the same as the http [POST /spans](https://zipkin.io/zipkin-api/#/paths/%252Fspans) body.
-
-### Json
-The message's binary data is a list of spans in json. The first character must be '[' (decimal 91).
-
-`Codec.JSON.writeSpans(spans)` performs the correct json encoding.
-
-Here's an example, sending a list of a single span to the zipkin queue:
-
-```bash
-$ curl -u admin:admin -X POST -s localhost:8161/api/message/zipkin?type=queue \
- -H "Content-Type: application/json" \
- -d '[{"traceId":"1","name":"bang","id":"2","timestamp":1470150004071068,"duration":1,"localEndpoint":{"serviceName":"flintstones"},"tags":{"lc":"bamm-bamm"}}]'
-```
diff --git a/zipkin-collector/activemq/pom.xml b/zipkin-collector/activemq/pom.xml
deleted file mode 100644
index 41ef0468520..00000000000
--- a/zipkin-collector/activemq/pom.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-
-
-
- 4.0.0
-
-
- io.zipkin.zipkin2
- zipkin-collector-parent
- 2.24.4-SNAPSHOT
-
-
- zipkin-collector-activemq
- Collector: ActiveMQ
- Zipkin span collector for ActiveMQ transport
-
-
- ${project.basedir}/../..
- 5.16.3
-
-
-
-
- ${project.groupId}
- zipkin-collector
- ${project.version}
-
-
-
- org.apache.activemq
- activemq-client
- ${activemq.version}
-
-
-
- org.apache.activemq
- activemq-broker
- ${activemq.version}
- test
-
-
-
- org.apache.activemq.tooling
- activemq-junit
- ${activemq.version}
- test
-
-
-
-
diff --git a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQCollector.java b/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQCollector.java
deleted file mode 100644
index 9eadcde99b3..00000000000
--- a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQCollector.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.activemq;
-
-import java.io.IOException;
-import java.io.UncheckedIOException;
-import javax.jms.JMSException;
-import org.apache.activemq.ActiveMQConnectionFactory;
-import zipkin2.CheckResult;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorComponent;
-import zipkin2.collector.CollectorMetrics;
-import zipkin2.collector.CollectorSampler;
-import zipkin2.storage.StorageComponent;
-
-/** This collector consumes encoded binary messages from a ActiveMQ queue. */
-public final class ActiveMQCollector extends CollectorComponent {
- public static Builder builder() {
- return new Builder();
- }
-
- /** Configuration including defaults needed to consume spans from a ActiveMQ queue. */
- public static final class Builder extends CollectorComponent.Builder {
- Collector.Builder delegate = Collector.newBuilder(ActiveMQCollector.class);
- CollectorMetrics metrics = CollectorMetrics.NOOP_METRICS;
- ActiveMQConnectionFactory connectionFactory;
- String queue = "zipkin";
- int concurrency = 1;
-
- @Override public Builder storage(StorageComponent storage) {
- this.delegate.storage(storage);
- return this;
- }
-
- @Override public Builder sampler(CollectorSampler sampler) {
- this.delegate.sampler(sampler);
- return this;
- }
-
- @Override public Builder metrics(CollectorMetrics metrics) {
- if (metrics == null) throw new NullPointerException("metrics == null");
- this.metrics = metrics.forTransport("activemq");
- this.delegate.metrics(this.metrics);
- return this;
- }
-
- public Builder connectionFactory(ActiveMQConnectionFactory connectionFactory) {
- if (connectionFactory == null) throw new NullPointerException("connectionFactory == null");
- this.connectionFactory = connectionFactory;
- return this;
- }
-
- /** Queue zipkin spans will be consumed from. Defaults to "zipkin". */
- public Builder queue(String queue) {
- if (queue == null) throw new NullPointerException("queue == null");
- this.queue = queue;
- return this;
- }
-
- /** Count of concurrent message listeners on the queue. Defaults to 1 */
- public Builder concurrency(int concurrency) {
- if (concurrency < 1) throw new IllegalArgumentException("concurrency < 1");
- this.concurrency = concurrency;
- return this;
- }
-
- @Override public ActiveMQCollector build() {
- if (connectionFactory == null) throw new NullPointerException("connectionFactory == null");
- return new ActiveMQCollector(this);
- }
- }
-
- final String queue;
- final LazyInit lazyInit;
-
- ActiveMQCollector(Builder builder) {
- this.queue = builder.queue;
- this.lazyInit = new LazyInit(builder);
- }
-
- @Override public ActiveMQCollector start() {
- lazyInit.init();
- return this;
- }
-
- @Override public CheckResult check() {
- if (lazyInit.result == null) {
- return CheckResult.failed(new IllegalStateException("Collector not yet started"));
- }
- return lazyInit.result.checkResult;
- }
-
- @Override public void close() throws IOException {
- lazyInit.close();
- }
-
- @Override public final String toString() {
- return "ActiveMQCollector{"
- + "brokerURL=" + lazyInit.connectionFactory.getBrokerURL()
- + ", queue=" + lazyInit.queue
- + "}";
- }
-
- static RuntimeException uncheckedException(String prefix, JMSException e) {
- Exception cause = e.getLinkedException();
- if (cause instanceof IOException) {
- return new UncheckedIOException(prefix + message(cause), (IOException) cause);
- }
- return new RuntimeException(prefix + message(e), e);
- }
-
- static String message(Exception cause) {
- return cause.getMessage() != null ? cause.getMessage() : cause.getClass().getSimpleName();
- }
-}
diff --git a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQSpanConsumer.java b/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQSpanConsumer.java
deleted file mode 100644
index 9cfdb19a41f..00000000000
--- a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/ActiveMQSpanConsumer.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.activemq;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import javax.jms.BytesMessage;
-import javax.jms.JMSException;
-import javax.jms.Message;
-import javax.jms.MessageListener;
-import javax.jms.Queue;
-import javax.jms.QueueReceiver;
-import javax.jms.QueueSession;
-import javax.jms.Session;
-import javax.jms.TextMessage;
-import org.apache.activemq.ActiveMQConnection;
-import org.apache.activemq.transport.TransportListener;
-import zipkin2.Callback;
-import zipkin2.CheckResult;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorMetrics;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-/**
- * Consumes spans from messages on a ActiveMQ queue. Malformed messages will be discarded. Errors in
- * the storage component will similarly be ignored, with no retry of the message.
- */
-final class ActiveMQSpanConsumer implements TransportListener, MessageListener, Closeable {
- static final Callback NOOP = new Callback() {
- @Override public void onSuccess(Void value) {
- }
-
- @Override public void onError(Throwable t) {
- }
- };
-
- static final CheckResult
- CLOSED = CheckResult.failed(new IllegalStateException("Collector intentionally closed")),
- INTERRUPTION = CheckResult.failed(new IOException("Recoverable error on ActiveMQ connection"));
-
- final Collector collector;
- final CollectorMetrics metrics;
-
- final ActiveMQConnection connection;
- final Map sessionToReceiver = new LinkedHashMap<>();
-
- volatile CheckResult checkResult = CheckResult.OK;
-
- ActiveMQSpanConsumer(Collector collector, CollectorMetrics metrics, ActiveMQConnection conn) {
- this.collector = collector;
- this.metrics = metrics;
- this.connection = conn;
- connection.addTransportListener(this);
- }
-
- /** JMS contract is one session per thread: we need a new session up to our concurrency level. */
- void registerInNewSession(ActiveMQConnection connection, String queue) throws JMSException {
- // Pass redundant info as we can't use default method in activeMQ
- QueueSession session = connection.createQueueSession(false, Session.AUTO_ACKNOWLEDGE);
- // No need to do anything on ActiveMQ side as physical queues are created on demand
- Queue destination = session.createQueue(queue);
- QueueReceiver receiver = session.createReceiver(destination);
- receiver.setMessageListener(this);
- sessionToReceiver.put(session, receiver);
- }
-
- @Override public void onCommand(Object o) {
- }
-
- @Override public void onException(IOException error) {
- checkResult = CheckResult.failed(error);
- }
-
- @Override public void transportInterupted() {
- checkResult = INTERRUPTION;
- }
-
- @Override public void transportResumed() {
- checkResult = CheckResult.OK;
- }
-
- @Override public void onMessage(Message message) {
- metrics.incrementMessages();
- byte[] serialized; // TODO: consider how to reuse buffers here
- try {
- if (message instanceof BytesMessage) {
- BytesMessage bytesMessage = (BytesMessage) message;
- serialized = new byte[(int) bytesMessage.getBodyLength()];
- bytesMessage.readBytes(serialized);
- } else if (message instanceof TextMessage) {
- String text = ((TextMessage) message).getText();
- serialized = text.getBytes(UTF_8);
- } else {
- metrics.incrementMessagesDropped();
- return;
- }
- } catch (Exception e) {
- metrics.incrementMessagesDropped();
- return;
- }
-
- metrics.incrementBytes(serialized.length);
- if (serialized.length == 0) return; // lenient on empty messages
- collector.acceptSpans(serialized, NOOP);
- }
-
- @Override public void close() {
- if (checkResult == CLOSED) return;
- checkResult = CLOSED;
- connection.removeTransportListener(this);
- try {
- for (Map.Entry sessionReceiver : sessionToReceiver.entrySet()) {
- sessionReceiver.getValue().setMessageListener(null); // deregister this
- sessionReceiver.getKey().close();
- }
- connection.close();
- } catch (JMSException ignored) {
- // EmptyCatch ignored
- }
- }
-}
diff --git a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/LazyInit.java b/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/LazyInit.java
deleted file mode 100644
index 2e518ad6009..00000000000
--- a/zipkin-collector/activemq/src/main/java/zipkin2/collector/activemq/LazyInit.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.activemq;
-
-import javax.jms.JMSException;
-import org.apache.activemq.ActiveMQConnection;
-import org.apache.activemq.ActiveMQConnectionFactory;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorMetrics;
-
-import static zipkin2.collector.activemq.ActiveMQCollector.uncheckedException;
-
-/**
- * Lazy creates a connection and registers a message listener up to the specified concurrency level.
- * This listener will also receive health notifications.
- */
-final class LazyInit {
- final Collector collector;
- final CollectorMetrics metrics;
- final ActiveMQConnectionFactory connectionFactory;
- final String queue;
- final int concurrency;
-
- volatile ActiveMQSpanConsumer result;
-
- LazyInit(ActiveMQCollector.Builder builder) {
- collector = builder.delegate.build();
- metrics = builder.metrics;
- connectionFactory = builder.connectionFactory;
- queue = builder.queue;
- concurrency = builder.concurrency;
- }
-
- ActiveMQSpanConsumer init() {
- if (result == null) {
- synchronized (this) {
- if (result == null) {
- result = doInit();
- }
- }
- }
- return result;
- }
-
- void close() {
- ActiveMQSpanConsumer maybe = result;
- if (maybe != null) result.close();
- }
-
- ActiveMQSpanConsumer doInit() {
- final ActiveMQConnection connection;
- try {
- connection = (ActiveMQConnection) connectionFactory.createQueueConnection();
- connection.start();
- } catch (JMSException e) {
- throw uncheckedException("Unable to establish connection to ActiveMQ broker: ", e);
- }
-
- try {
- ActiveMQSpanConsumer result = new ActiveMQSpanConsumer(collector, metrics, connection);
-
- for (int i = 0; i < concurrency; i++) {
- result.registerInNewSession(connection, queue);
- }
-
- return result;
- } catch (JMSException e) {
- try {
- connection.close();
- } catch (JMSException ignored) {
- // EmptyCatch ignored
- }
- throw uncheckedException("Unable to create queueReceiver(" + queue + "): ", e);
- }
- }
-}
diff --git a/zipkin-collector/activemq/src/test/java/zipkin2/collector/activemq/ITActiveMQCollector.java b/zipkin-collector/activemq/src/test/java/zipkin2/collector/activemq/ITActiveMQCollector.java
deleted file mode 100644
index 72949e4ca78..00000000000
--- a/zipkin-collector/activemq/src/test/java/zipkin2/collector/activemq/ITActiveMQCollector.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.activemq;
-
-import java.io.IOException;
-import java.io.UncheckedIOException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.activemq.ActiveMQConnectionFactory;
-import org.apache.activemq.junit.EmbeddedActiveMQBroker;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TestName;
-import zipkin2.Call;
-import zipkin2.Callback;
-import zipkin2.Component;
-import zipkin2.Span;
-import zipkin2.codec.SpanBytesEncoder;
-import zipkin2.collector.InMemoryCollectorMetrics;
-import zipkin2.storage.ForwardingStorageComponent;
-import zipkin2.storage.SpanConsumer;
-import zipkin2.storage.StorageComponent;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static zipkin2.TestObjects.LOTS_OF_SPANS;
-import static zipkin2.TestObjects.UTF_8;
-import static zipkin2.codec.SpanBytesEncoder.PROTO3;
-import static zipkin2.codec.SpanBytesEncoder.THRIFT;
-
-public class ITActiveMQCollector {
- List spans = Arrays.asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1]);
-
- @ClassRule public static EmbeddedActiveMQBroker activemq = new EmbeddedActiveMQBroker();
- @Rule public TestName testName = new TestName();
- @Rule public ExpectedException thrown = ExpectedException.none();
-
- InMemoryCollectorMetrics metrics = new InMemoryCollectorMetrics();
- InMemoryCollectorMetrics activemqMetrics = metrics.forTransport("activemq");
-
- CopyOnWriteArraySet threadsProvidingSpans = new CopyOnWriteArraySet<>();
- LinkedBlockingQueue> receivedSpans = new LinkedBlockingQueue<>();
- SpanConsumer consumer = (spans) -> {
- threadsProvidingSpans.add(Thread.currentThread());
- receivedSpans.add(spans);
- return Call.create(null);
- };
-
- ActiveMQCollector collector;
-
- @Before public void start() {
- collector = builder().build().start();
- }
-
- @After public void stop() throws IOException {
- collector.close();
- }
-
- @Test public void checkPasses() {
- assertThat(collector.check().ok()).isTrue();
- }
-
- @Test public void startFailsWithInvalidActiveMqServer() throws Exception {
- collector.close();
-
- ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory();
- // we can be pretty certain ActiveMQ isn't running on localhost port 80
- connectionFactory.setBrokerURL("tcp://localhost:80");
- collector = builder().connectionFactory(connectionFactory).build();
-
- thrown.expect(UncheckedIOException.class);
- thrown.expectMessage("Unable to establish connection to ActiveMQ broker: Connection refused");
- collector.start();
- }
-
- /**
- * The {@code toString()} of {@link Component} implementations appear in health check endpoints.
- * Since these are likely to be exposed in logs and other monitoring tools, care should be taken
- * to ensure {@code toString()} output is a reasonable length and does not contain sensitive
- * information.
- */
- @Test public void toStringContainsOnlySummaryInformation() {
- assertThat(collector).hasToString(String.format("ActiveMQCollector{brokerURL=%s, queue=%s}",
- activemq.getVmURL(), testName.getMethodName())
- );
- }
-
- /** Ensures list encoding works: a json encoded list of spans */
- @Test public void messageWithMultipleSpans_json() throws Exception {
- messageWithMultipleSpans(SpanBytesEncoder.JSON_V1);
- }
-
- /** Ensures list encoding works: a version 2 json list of spans */
- @Test public void messageWithMultipleSpans_json2() throws Exception {
- messageWithMultipleSpans(SpanBytesEncoder.JSON_V2);
- }
-
- /** Ensures list encoding works: proto3 ListOfSpans */
- @Test public void messageWithMultipleSpans_proto3() throws Exception {
- messageWithMultipleSpans(SpanBytesEncoder.PROTO3);
- }
-
- void messageWithMultipleSpans(SpanBytesEncoder encoder) throws Exception {
- byte[] message = encoder.encodeList(spans);
- activemq.pushMessage(collector.queue, message);
-
- assertThat(receivedSpans.take()).isEqualTo(spans);
-
- assertThat(activemqMetrics.messages()).isEqualTo(1);
- assertThat(activemqMetrics.messagesDropped()).isZero();
- assertThat(activemqMetrics.bytes()).isEqualTo(message.length);
- assertThat(activemqMetrics.spans()).isEqualTo(spans.size());
- assertThat(activemqMetrics.spansDropped()).isZero();
- }
-
- /** Ensures malformed spans don't hang the collector */
- @Test public void skipsMalformedData() throws Exception {
- byte[] malformed1 = "[\"='".getBytes(UTF_8); // screwed up json
- byte[] malformed2 = "malformed".getBytes(UTF_8);
- activemq.pushMessage(collector.queue, THRIFT.encodeList(spans));
- activemq.pushMessage(collector.queue, new byte[0]);
- activemq.pushMessage(collector.queue, malformed1);
- activemq.pushMessage(collector.queue, malformed2);
- activemq.pushMessage(collector.queue, THRIFT.encodeList(spans));
-
- Thread.sleep(1000);
-
- assertThat(activemqMetrics.messages()).isEqualTo(5);
- assertThat(activemqMetrics.messagesDropped()).isEqualTo(2); // only malformed, not empty
- assertThat(activemqMetrics.bytes())
- .isEqualTo(THRIFT.encodeList(spans).length * 2 + malformed1.length + malformed2.length);
- assertThat(activemqMetrics.spans()).isEqualTo(spans.size() * 2);
- assertThat(activemqMetrics.spansDropped()).isZero();
- }
-
- /** Guards against errors that leak from storage, such as InvalidQueryException */
- @Test public void skipsOnSpanStorageException() throws Exception {
- collector.close();
-
- AtomicInteger counter = new AtomicInteger();
- consumer = (input) -> new Call.Base() {
- @Override protected Void doExecute() {
- throw new AssertionError();
- }
-
- @Override protected void doEnqueue(Callback callback) {
- if (counter.getAndIncrement() == 1) {
- callback.onError(new RuntimeException("storage fell over"));
- } else {
- receivedSpans.add(spans);
- callback.onSuccess(null);
- }
- }
-
- @Override public Call clone() {
- throw new AssertionError();
- }
- };
-
- activemq.pushMessage(collector.queue, PROTO3.encodeList(spans));
- activemq.pushMessage(collector.queue, PROTO3.encodeList(spans)); // tossed on error
- activemq.pushMessage(collector.queue, PROTO3.encodeList(spans));
-
- collector = builder().storage(buildStorage(consumer)).build().start();
-
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- // the only way we could read this, is if the malformed span was skipped.
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
-
- assertThat(activemqMetrics.messages()).isEqualTo(3);
- assertThat(activemqMetrics.messagesDropped()).isZero(); // storage failure not message failure
- assertThat(activemqMetrics.bytes()).isEqualTo(PROTO3.encodeList(spans).length * 3);
- assertThat(activemqMetrics.spans()).isEqualTo(spans.size() * 3);
- assertThat(activemqMetrics.spansDropped()).isEqualTo(spans.size()); // only one dropped
- }
-
- @Test public void messagesDistributedAcrossMultipleThreadsSuccessfully() throws Exception {
- collector.close();
-
- CountDownLatch latch = new CountDownLatch(2);
- collector = builder().concurrency(2).storage(buildStorage((spans) -> {
- latch.countDown();
- try {
- latch.await(); // await the other one as this proves 2 threads are in use
- } catch (InterruptedException e) {
- throw new AssertionError(e);
- }
- return consumer.accept(spans);
- })).build().start();
-
- activemq.pushMessage(collector.queue, ""); // empty bodies don't go to storage
- activemq.pushMessage(collector.queue, PROTO3.encodeList(spans));
- activemq.pushMessage(collector.queue, PROTO3.encodeList(spans));
-
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- latch.countDown();
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
-
- assertThat(threadsProvidingSpans.size()).isEqualTo(2);
-
- assertThat(activemqMetrics.messages()).isEqualTo(3); // 2 + empty body for warmup
- assertThat(activemqMetrics.messagesDropped()).isZero();
- assertThat(activemqMetrics.bytes()).isEqualTo(PROTO3.encodeList(spans).length * 2);
- assertThat(activemqMetrics.spans()).isEqualTo(spans.size() * 2);
- assertThat(activemqMetrics.spansDropped()).isZero();
- }
-
- ActiveMQCollector.Builder builder() {
- return ActiveMQCollector.builder()
- .connectionFactory(activemq.createConnectionFactory())
- .storage(buildStorage(consumer))
- .metrics(metrics)
- // prevent test flakes by having each run in an individual queue
- .queue(testName.getMethodName());
- }
-
- static StorageComponent buildStorage(final SpanConsumer spanConsumer) {
- return new ForwardingStorageComponent() {
- @Override protected StorageComponent delegate() {
- throw new AssertionError();
- }
-
- @Override public SpanConsumer spanConsumer() {
- return spanConsumer;
- }
- };
- }
-}
diff --git a/zipkin-collector/activemq/src/test/resources/simplelogger.properties b/zipkin-collector/activemq/src/test/resources/simplelogger.properties
deleted file mode 100644
index 41089aca77e..00000000000
--- a/zipkin-collector/activemq/src/test/resources/simplelogger.properties
+++ /dev/null
@@ -1,8 +0,0 @@
-# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options
-
-org.slf4j.simpleLogger.logFile=System.out
-org.slf4j.simpleLogger.defaultLogLevel=warn
-org.slf4j.simpleLogger.showDateTime=true
-org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS
-
-org.slf4j.simpleLogger.log.zipkin2.collector.activemq=debug
diff --git a/zipkin-collector/core/pom.xml b/zipkin-collector/core/pom.xml
deleted file mode 100644
index eff1b899b15..00000000000
--- a/zipkin-collector/core/pom.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
-
- 4.0.0
-
-
- io.zipkin.zipkin2
- zipkin-collector-parent
- 2.24.4-SNAPSHOT
-
-
- zipkin-collector
- Collector: Core Library
-
-
- ${project.basedir}/../..
-
-
-
-
- org.slf4j
- slf4j-api
- ${slf4j.version}
-
-
-
- uk.org.lidalia
- slf4j-test
- 1.2.0
- test
-
-
-
diff --git a/zipkin-collector/core/src/main/java/zipkin2/collector/Collector.java b/zipkin-collector/core/src/main/java/zipkin2/collector/Collector.java
deleted file mode 100644
index 8f07c5e4ef3..00000000000
--- a/zipkin-collector/core/src/main/java/zipkin2/collector/Collector.java
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.Executor;
-import java.util.function.Supplier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import zipkin2.Callback;
-import zipkin2.Span;
-import zipkin2.SpanBytesDecoderDetector;
-import zipkin2.codec.BytesDecoder;
-import zipkin2.codec.SpanBytesDecoder;
-import zipkin2.storage.StorageComponent;
-
-import static java.lang.String.format;
-import static java.util.logging.Level.FINE;
-import static zipkin2.Call.propagateIfFatal;
-
-/**
- * This component takes action on spans received from a transport. This includes deserializing,
- * sampling and scheduling for storage.
- *
- *
Callbacks passed do not propagate to the storage layer. They only return success or failures
- * before storage is attempted. This ensures that calling threads are disconnected from storage
- * threads.
- */
-public class Collector { // not final for mock
- static final Callback NOOP_CALLBACK = new Callback() {
- @Override public void onSuccess(Void value) {
- }
-
- @Override public void onError(Throwable t) {
- }
- };
-
- /** Needed to scope this to the correct logging category */
- public static Builder newBuilder(Class> loggingClass) {
- if (loggingClass == null) throw new NullPointerException("loggingClass == null");
- return new Builder(LoggerFactory.getLogger(loggingClass.getName()));
- }
-
- public static final class Builder {
- final Logger logger;
- StorageComponent storage;
- CollectorSampler sampler;
- CollectorMetrics metrics;
-
- Builder(Logger logger) {
- this.logger = logger;
- }
-
- /** Sets {@link {@link CollectorComponent.Builder#storage(StorageComponent)}} */
- public Builder storage(StorageComponent storage) {
- if (storage == null) throw new NullPointerException("storage == null");
- this.storage = storage;
- return this;
- }
-
- /** Sets {@link {@link CollectorComponent.Builder#metrics(CollectorMetrics)}} */
- public Builder metrics(CollectorMetrics metrics) {
- if (metrics == null) throw new NullPointerException("metrics == null");
- this.metrics = metrics;
- return this;
- }
-
- /** Sets {@link {@link CollectorComponent.Builder#sampler(CollectorSampler)}} */
- public Builder sampler(CollectorSampler sampler) {
- if (sampler == null) throw new NullPointerException("sampler == null");
- this.sampler = sampler;
- return this;
- }
-
- public Collector build() {
- return new Collector(this);
- }
- }
-
- final Logger logger;
- final CollectorMetrics metrics;
- final CollectorSampler sampler;
- final StorageComponent storage;
-
- Collector(Builder builder) {
- if (builder.logger == null) throw new NullPointerException("logger == null");
- this.logger = builder.logger;
- this.metrics = builder.metrics == null ? CollectorMetrics.NOOP_METRICS : builder.metrics;
- if (builder.storage == null) throw new NullPointerException("storage == null");
- this.storage = builder.storage;
- this.sampler = builder.sampler == null ? CollectorSampler.ALWAYS_SAMPLE : builder.sampler;
- }
-
- public void accept(List spans, Callback callback) {
- accept(spans, callback, Runnable::run);
- }
-
- /**
- * Calls to get the storage component could be blocking. This ensures requests that block
- * callers (such as http or gRPC) do not add additional load during such events.
- *
- * @param executor the executor used to enqueue the storage request.
- */
- public void accept(List spans, Callback callback, Executor executor) {
- if (spans.isEmpty()) {
- callback.onSuccess(null);
- return;
- }
- metrics.incrementSpans(spans.size());
-
- List sampledSpans = sample(spans);
- if (sampledSpans.isEmpty()) {
- callback.onSuccess(null);
- return;
- }
-
- // In order to ensure callers are not blocked, we swap callbacks when we get to the storage
- // phase of this process. Here, we create a callback whose sole purpose is classifying later
- // errors on this bundle of spans in the same log category. This allows people to only turn on
- // debug logging in one place.
- try {
- executor.execute(new StoreSpans(sampledSpans));
- callback.onSuccess(null);
- } catch (Throwable unexpected) { // ensure if a future is supplied we always set value or error
- callback.onError(unexpected);
- throw unexpected;
- }
- }
-
- /** Like {@link #acceptSpans(byte[], BytesDecoder, Callback)}, except using a byte buffer. */
- public void acceptSpans(ByteBuffer encoded, SpanBytesDecoder decoder, Callback callback,
- Executor executor) {
- List spans;
- try {
- spans = decoder.decodeList(encoded);
- } catch (RuntimeException | Error e) {
- handleDecodeError(e, callback);
- return;
- }
- accept(spans, callback, executor);
- }
-
- /**
- * Before calling this, call {@link CollectorMetrics#incrementMessages()}, and {@link
- * CollectorMetrics#incrementBytes(int)}. Do not call any other metrics callbacks as those are
- * handled internal to this method.
- *
- * @param serialized not empty message
- */
- public void acceptSpans(byte[] serialized, Callback callback) {
- BytesDecoder decoder;
- try {
- decoder = SpanBytesDecoderDetector.decoderForListMessage(serialized);
- } catch (RuntimeException | Error e) {
- handleDecodeError(e, callback);
- return;
- }
- acceptSpans(serialized, decoder, callback);
- }
-
- /**
- * Before calling this, call {@link CollectorMetrics#incrementMessages()}, and {@link
- * CollectorMetrics#incrementBytes(int)}. Do not call any other metrics callbacks as those are
- * handled internal to this method.
- *
- * @param serializedSpans not empty message
- */
- public void acceptSpans(
- byte[] serializedSpans, BytesDecoder decoder, Callback callback) {
- List spans;
- try {
- spans = decodeList(decoder, serializedSpans);
- } catch (RuntimeException | Error e) {
- handleDecodeError(e, callback);
- return;
- }
- accept(spans, callback);
- }
-
- List decodeList(BytesDecoder decoder, byte[] serialized) {
- List out = new ArrayList<>();
- decoder.decodeList(serialized, out);
- return out;
- }
-
- void store(List sampledSpans, Callback callback) {
- storage.spanConsumer().accept(sampledSpans).enqueue(callback);
- }
-
- String idString(Span span) {
- return span.traceId() + "/" + span.id();
- }
-
- List sample(List input) {
- List sampled = new ArrayList<>(input.size());
- for (int i = 0, length = input.size(); i < length; i++) {
- Span s = input.get(i);
- if (sampler.isSampled(s.traceId(), Boolean.TRUE.equals(s.debug()))) {
- sampled.add(s);
- }
- }
- int dropped = input.size() - sampled.size();
- if (dropped > 0) metrics.incrementSpansDropped(dropped);
- return sampled;
- }
-
- class StoreSpans implements Callback, Runnable {
- final List spans;
-
- StoreSpans(List spans) {
- this.spans = spans;
- }
-
- @Override public void run() {
- try {
- store(spans, this);
- } catch (RuntimeException | Error e) {
- // While unexpected, invoking the storage command could raise an error synchronously. When
- // that's the case, we wouldn't have invoked callback.onSuccess, so we need to handle the
- // error here.
- onError(e);
- }
- }
-
- @Override public void onSuccess(Void value) {
- }
-
- @Override public void onError(Throwable t) {
- handleStorageError(spans, t, NOOP_CALLBACK);
- }
-
- @Override public String toString() {
- return appendSpanIds(spans, new StringBuilder("StoreSpans(")) + ")";
- }
- }
-
- void handleDecodeError(Throwable e, Callback callback) {
- metrics.incrementMessagesDropped();
- handleError(e, "Cannot decode spans"::toString, callback);
- }
-
- /**
- * When storing spans, an exception can be raised before or after the fact. This adds context of
- * span ids to give logs more relevance.
- */
- void handleStorageError(List spans, Throwable e, Callback callback) {
- metrics.incrementSpansDropped(spans.size());
- // The exception could be related to a span being huge. Instead of filling logs,
- // print trace id, span id pairs
- handleError(e, () -> appendSpanIds(spans, new StringBuilder("Cannot store spans ")), callback);
- }
-
- void handleError(Throwable e, Supplier defaultLogMessage, Callback callback) {
- propagateIfFatal(e);
- callback.onError(e);
- if (!logger.isDebugEnabled()) return;
-
- String error = e.getMessage() != null ? e.getMessage() : "";
- // We have specific code that customizes log messages. Use this when the case.
- if (error.startsWith("Malformed") || error.startsWith("Truncated")) {
- logger.debug(error, e);
- } else { // otherwise, beautify the message
- String message =
- format("%s due to %s(%s)", defaultLogMessage.get(), e.getClass().getSimpleName(), error);
- logger.debug(message, e);
- }
- }
-
- // TODO: this logic needs to be redone as service names are more important than span IDs. Also,
- // span IDs repeat between client and server!
- String appendSpanIds(List spans, StringBuilder message) {
- message.append("[");
- int i = 0;
- Iterator iterator = spans.iterator();
- while (iterator.hasNext() && i++ < 3) {
- message.append(idString(iterator.next()));
- if (iterator.hasNext()) message.append(", ");
- }
- if (iterator.hasNext()) message.append("...");
-
- return message.append("]").toString();
- }
-}
diff --git a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorComponent.java b/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorComponent.java
deleted file mode 100644
index 99d800017a7..00000000000
--- a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorComponent.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector;
-
-import java.util.List;
-import zipkin2.Component;
-import zipkin2.storage.SpanConsumer;
-import zipkin2.storage.StorageComponent;
-
-/**
- * The collector represents the server-side of a transport. Its job is to take spans from a
- * transport and store ones it has sampled.
- *
- *
Call {@link #start()} to start collecting spans.
- */
-public abstract class CollectorComponent extends Component {
-
- /**
- * Starts the server-side of the transport, typically listening or looking up a queue.
- *
- *
Many implementations block the calling thread until services are available.
- */
- public abstract CollectorComponent start();
-
- public abstract static class Builder {
- /**
- * Once spans are sampled, they are {@link SpanConsumer#accept(List)} queued for storage} using
- * this component.
- */
- public abstract Builder storage(StorageComponent storage);
-
- /**
- * Aggregates and reports collection metrics to a monitoring system. Should be {@link
- * CollectorMetrics#forTransport(String) scoped to this transport}. Defaults to no-op.
- */
- public abstract Builder metrics(CollectorMetrics metrics);
-
- /**
- * {@link CollectorSampler#isSampled(String, boolean) samples spans} to reduce load on the
- * storage system. Defaults to always sample.
- */
- public abstract Builder sampler(CollectorSampler sampler);
-
- public abstract CollectorComponent build();
- }
-}
diff --git a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorMetrics.java b/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorMetrics.java
deleted file mode 100644
index e55213c9d63..00000000000
--- a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorMetrics.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector;
-
-import java.util.Collection;
-import java.util.List;
-import zipkin2.codec.SpanBytesDecoder;
-import zipkin2.storage.SpanConsumer;
-
-/**
- * Instrumented applications report spans over a transport such as Kafka. Zipkin collectors receive
- * these messages, {@link SpanBytesDecoder#decode(byte[], Collection) decoding them into spans},
- * {@link CollectorSampler#isSampled(String, boolean) apply sampling}, and {@link
- * SpanConsumer#accept(List) queues them for storage}.
- *
- *
Callbacks on this type are invoked by zipkin collectors to improve the visibility of the
- * system. A typical implementation will report metrics to a telemetry system for analysis and
- * reporting.
- *
- *
Spans Collected vs Queryable Spans
- *
- *
A span queried may be comprised of multiple spans collected. While instrumentation should
- * report complete spans, Instrumentation often patch the same span twice, ex adding annotations.
- * Also, RPC spans include at least 2 messages due to the client and the server reporting
- * separately. Finally, some storage components merge patches at ingest. For these reasons, you
- * should be cautious to alert on queryable spans vs stored spans, unless you control the
- * instrumentation in such a way that queryable spans/message is reliable.
- *
- *
Key Relationships
- *
- *
The following relationships can be used to consider health of the tracing system.
- *
- *
- *
- *
Successful Messages = {@link #incrementMessages() Accepted messages} -
- * {@link #incrementMessagesDropped() Dropped messages}. Alert when this is less than amount of
- * messages sent from instrumentation.
- *
Stored spans <= {@link #incrementSpans(int) Accepted spans} - {@link
- * #incrementSpansDropped(int) Dropped spans}. Alert when this drops below the
- * {@link CollectorSampler#isSampled(long, boolean) collection-tier sample rate}.
- *
- *
- *
- */
-public interface CollectorMetrics {
-
- /**
- * Those who wish to partition metrics by transport can call this method to include the transport
- * type in the backend metric key.
- *
- *
For example, an implementation may by default report {@link #incrementSpans(int) incremented
- * spans} to the key "zipkin.collector.span.accepted". When {@code metrics.forTransport("kafka"}
- * is called, the counter would report to "zipkin.collector.kafka.span.accepted"
- *
- * @param transportType ex "http", "rabbitmq", "kafka"
- */
- CollectorMetrics forTransport(String transportType);
-
- /**
- * Increments count of messages received, which contain 0 or more spans. Ex POST requests or Kafka
- * messages consumed.
- */
- void incrementMessages();
-
- /**
- * Increments count of messages that could not be read. Ex malformed content, or peer disconnect.
- */
- void incrementMessagesDropped();
-
- /**
- * Increments the count of spans read from a successful message. When bundling is used, accepted
- * spans will be a larger number than successful messages.
- */
- void incrementSpans(int quantity);
-
- /**
- * Increments the number of bytes containing serialized spans in a message.
- *
- *
Note: this count should relate to the raw data structures, like json or thrift, and discount
- * compression, enveloping, etc.
- */
- void incrementBytes(int quantity);
-
- /**
- * Increments the count of spans dropped for any reason. For example, failure queueing to storage
- * or sampling decisions.
- */
- void incrementSpansDropped(int quantity);
-
- CollectorMetrics NOOP_METRICS =
- new CollectorMetrics() {
-
- @Override
- public CollectorMetrics forTransport(String transportType) {
- return this;
- }
-
- @Override
- public void incrementMessages() {}
-
- @Override
- public void incrementMessagesDropped() {}
-
- @Override
- public void incrementSpans(int quantity) {}
-
- @Override
- public void incrementBytes(int quantity) {}
-
- @Override
- public void incrementSpansDropped(int quantity) {}
-
- @Override
- public String toString() {
- return "NoOpCollectorMetrics";
- }
- };
-}
diff --git a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorSampler.java b/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorSampler.java
deleted file mode 100644
index 152638d0401..00000000000
--- a/zipkin-collector/core/src/main/java/zipkin2/collector/CollectorSampler.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector;
-
-import zipkin2.Span;
-import zipkin2.internal.HexCodec;
-
-/**
- * CollectorSampler decides if a particular trace should be "sampled", i.e. recorded in permanent
- * storage. This involves a consistent decision based on the span's trace ID with one notable
- * exception: {@link Span#debug() Debug} spans are always stored.
- *
- *
Implementation
- *
- *
Accepts a percentage of trace ids by comparing their absolute value against a potentially
- * dynamic boundary. eg {@code isSampled == abs(traceId) <= boundary}
- *
- *
While idempotent, this implementation's sample rate won't exactly match the input rate because
- * trace ids are not perfectly distributed across 64bits. For example, tests have shown an error
- * rate of 3% when 100K trace ids are {@link java.util.Random#nextLong random}.
- */
-public abstract class CollectorSampler {
- public static final CollectorSampler ALWAYS_SAMPLE = CollectorSampler.create(1.0f);
-
- /**
- * Returns a trace ID sampler with the indicated rate.
- *
- * @param rate minimum sample rate is 0.0001, or 0.01% of traces
- */
- public static CollectorSampler create(float rate) {
- if (rate < 0 || rate > 1)
- throw new IllegalArgumentException("rate should be between 0 and 1: was " + rate);
- final long boundary = (long) (Long.MAX_VALUE * rate); // safe cast as less <= 1
- return new CollectorSampler() {
- @Override
- protected long boundary() {
- return boundary;
- }
- };
- }
-
- protected abstract long boundary();
-
- /**
- * Returns true if spans with this trace ID should be recorded to storage.
- *
- *
Zipkin v1 allows storage-layer sampling, which can help prevent spikes in traffic from
- * overloading the system. Debug spans are always stored.
- *
- *
This uses only the lower 64 bits of the trace ID as instrumentation still send mixed trace
- * ID width.
- *
- * @param hexTraceId the lower 64 bits of the span's trace ID are checked against the boundary
- * @param debug when true, always passes sampling
- */
- public boolean isSampled(String hexTraceId, boolean debug) {
- if (Boolean.TRUE.equals(debug)) return true;
- long traceId = HexCodec.lowerHexToUnsignedLong(hexTraceId);
- // The absolute value of Long.MIN_VALUE is larger than a long, so Math.abs returns identity.
- // This converts to MAX_VALUE to avoid always dropping when traceId == Long.MIN_VALUE
- long t = traceId == Long.MIN_VALUE ? Long.MAX_VALUE : Math.abs(traceId);
- return t <= boundary();
- }
-
- @Override
- public String toString() {
- return "CollectorSampler(" + boundary() + ")";
- }
-
- protected CollectorSampler() {}
-}
diff --git a/zipkin-collector/core/src/main/java/zipkin2/collector/InMemoryCollectorMetrics.java b/zipkin-collector/core/src/main/java/zipkin2/collector/InMemoryCollectorMetrics.java
deleted file mode 100644
index 7555556ba9f..00000000000
--- a/zipkin-collector/core/src/main/java/zipkin2/collector/InMemoryCollectorMetrics.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector;
-
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicInteger;
-
-public final class InMemoryCollectorMetrics implements CollectorMetrics {
-
- private final ConcurrentHashMap metrics;
- private final String messages;
- private final String messagesDropped;
- private final String bytes;
- private final String spans;
- private final String spansDropped;
-
- public InMemoryCollectorMetrics() {
- this(new ConcurrentHashMap<>(), null);
- }
-
- InMemoryCollectorMetrics(ConcurrentHashMap metrics, String transport) {
- this.metrics = metrics;
- this.messages = scope("messages", transport);
- this.messagesDropped = scope("messagesDropped", transport);
- this.bytes = scope("bytes", transport);
- this.spans = scope("spans", transport);
- this.spansDropped = scope("spansDropped", transport);
- }
-
- @Override
- public InMemoryCollectorMetrics forTransport(String transportType) {
- if (transportType == null) throw new NullPointerException("transportType == null");
- return new InMemoryCollectorMetrics(metrics, transportType);
- }
-
- @Override
- public void incrementMessages() {
- increment(messages, 1);
- }
-
- public int messages() {
- return get(messages);
- }
-
- @Override
- public void incrementMessagesDropped() {
- increment(messagesDropped, 1);
- }
-
- public int messagesDropped() {
- return get(messagesDropped);
- }
-
- @Override
- public void incrementBytes(int quantity) {
- increment(bytes, quantity);
- }
-
- public int bytes() {
- return get(bytes);
- }
-
- @Override
- public void incrementSpans(int quantity) {
- increment(spans, quantity);
- }
-
- public int spans() {
- return get(spans);
- }
-
- @Override
- public void incrementSpansDropped(int quantity) {
- increment(spansDropped, quantity);
- }
-
- public int spansDropped() {
- return get(spansDropped);
- }
-
- public void clear() {
- metrics.clear();
- }
-
- private int get(String key) {
- AtomicInteger atomic = metrics.get(key);
- return atomic == null ? 0 : atomic.get();
- }
-
- private void increment(String key, int quantity) {
- if (quantity == 0) return;
- while (true) {
- AtomicInteger metric = metrics.get(key);
- if (metric == null) {
- metric = metrics.putIfAbsent(key, new AtomicInteger(quantity));
- if (metric == null) return; // won race creating the entry
- }
-
- while (true) {
- int oldValue = metric.get();
- int update = oldValue + quantity;
- if (metric.compareAndSet(oldValue, update)) return; // won race updating
- }
- }
- }
-
- static String scope(String key, String transport) {
- return key + (transport == null ? "" : "." + transport);
- }
-}
diff --git a/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorSamplerTest.java b/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorSamplerTest.java
deleted file mode 100644
index ebbf04ae224..00000000000
--- a/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorSamplerTest.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector;
-
-import java.util.stream.Stream;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import zipkin2.Span;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.data.Percentage.withPercentage;
-import static zipkin2.TestObjects.LOTS_OF_SPANS;
-
-public class CollectorSamplerTest {
-
- @Rule public ExpectedException thrown = ExpectedException.none();
-
- /**
- * Math.abs("8000000000000000") returns a negative, we coerse to "7fffffffffffffff" to avoid
- * always dropping when trace_id == "8000000000000000"
- */
- @Test
- public void mostNegativeNumberDefence() {
- CollectorSampler sampler = CollectorSampler.create(0.1f);
-
- assertThat(sampler.isSampled("8000000000000000", false))
- .isEqualTo(sampler.isSampled("7fffffffffffffff", false));
- }
-
- @Test
- public void debugWins() {
- CollectorSampler sampler = CollectorSampler.create(0.0f);
-
- assertThat(sampler.isSampled("8000000000000000", true)).isTrue();
- }
-
- @Test
- public void retain10Percent() {
- float sampleRate = 0.1f;
- CollectorSampler sampler = CollectorSampler.create(sampleRate);
-
- assertThat(lotsOfSpans().filter(s -> sampler.isSampled(s.traceId(), false)).count())
- .isCloseTo((long) (LOTS_OF_SPANS.length * sampleRate), withPercentage(3));
- }
-
- /** The collector needs to apply the same decision to incremental updates in a trace. */
- @Test
- public void idempotent() {
- CollectorSampler sampler1 = CollectorSampler.create(0.1f);
- CollectorSampler sampler2 = CollectorSampler.create(0.1f);
-
- assertThat(lotsOfSpans().filter(s -> sampler1.isSampled(s.traceId(), false)).toArray())
- .containsExactly(
- lotsOfSpans().filter(s -> sampler2.isSampled(s.traceId(), false)).toArray());
- }
-
- @Test
- public void zeroMeansDropAllTraces() {
- CollectorSampler sampler = CollectorSampler.create(0.0f);
-
- assertThat(lotsOfSpans().filter(s -> sampler.isSampled(s.traceId(), false))).isEmpty();
- }
-
- @Test
- public void oneMeansKeepAllTraces() {
- CollectorSampler sampler = CollectorSampler.create(1.0f);
-
- assertThat(lotsOfSpans().filter(s -> sampler.isSampled(s.traceId(), false)))
- .hasSize(LOTS_OF_SPANS.length);
- }
-
- @Test
- public void rateCantBeNegative() {
- thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("rate should be between 0 and 1: was -1.0");
-
- CollectorSampler.create(-1.0f);
- }
-
- @Test
- public void rateCantBeOverOne() {
- thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("rate should be between 0 and 1: was 1.1");
-
- CollectorSampler.create(1.1f);
- }
-
- static Stream lotsOfSpans() {
- return Stream.of(LOTS_OF_SPANS).parallel();
- }
-}
diff --git a/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorTest.java b/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorTest.java
deleted file mode 100644
index 22682db5c01..00000000000
--- a/zipkin-collector/core/src/test/java/zipkin2/collector/CollectorTest.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector;
-
-import java.util.concurrent.RejectedExecutionException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.LoggerFactory;
-import uk.org.lidalia.slf4jext.Level;
-import uk.org.lidalia.slf4jtest.TestLogger;
-import uk.org.lidalia.slf4jtest.TestLoggerFactory;
-import zipkin2.Callback;
-import zipkin2.Span;
-import zipkin2.codec.SpanBytesDecoder;
-import zipkin2.codec.SpanBytesEncoder;
-import zipkin2.storage.InMemoryStorage;
-import zipkin2.storage.StorageComponent;
-
-import static java.util.Arrays.asList;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.when;
-import static zipkin2.TestObjects.CLIENT_SPAN;
-import static zipkin2.TestObjects.TRACE;
-import static zipkin2.TestObjects.UTF_8;
-
-public class CollectorTest {
- InMemoryStorage storage = InMemoryStorage.newBuilder().build();
- Callback callback = mock(Callback.class);
- CollectorMetrics metrics = mock(CollectorMetrics.class);
- Collector collector;
- private TestLogger testLogger = TestLoggerFactory.getTestLogger("");
-
- @Before
- public void setup() {
- testLogger.clearAll();
- collector = spy(
- new Collector.Builder(testLogger).metrics(metrics).storage(storage).build());
- when(collector.idString(CLIENT_SPAN)).thenReturn("1"); // to make expectations easier to read
- }
-
- @After
- public void after() {
- verifyNoMoreInteractions(metrics, callback);
- }
-
- @Test
- public void unsampledSpansArentStored() {
- collector = new Collector.Builder(LoggerFactory.getLogger(""))
- .sampler(CollectorSampler.create(0.0f))
- .metrics(metrics)
- .storage(storage)
- .build();
-
- collector.accept(TRACE, callback);
-
- verify(callback).onSuccess(null);
- assertThat(testLogger.getLoggingEvents()).isEmpty();
- verify(metrics).incrementSpans(4);
- verify(metrics).incrementSpansDropped(4);
- assertThat(storage.getTraces()).isEmpty();
- }
-
- @Test
- public void errorDetectingFormat() {
- collector.acceptSpans(new byte[] {'f', 'o', 'o'}, callback);
-
- verify(callback).onError(any(RuntimeException.class));
- verify(metrics).incrementMessagesDropped();
- }
-
- @Test
- public void acceptSpans_jsonV2() {
- byte[] bytes = SpanBytesEncoder.JSON_V2.encodeList(TRACE);
- collector.acceptSpans(bytes, callback);
-
- verify(collector).acceptSpans(bytes, SpanBytesDecoder.JSON_V2, callback);
-
- verify(callback).onSuccess(null);
- assertThat(testLogger.getLoggingEvents()).isEmpty();
- verify(metrics).incrementSpans(4);
- assertThat(storage.getTraces()).containsOnly(TRACE);
- }
-
- @Test
- public void acceptSpans_decodingError() {
- byte[] bytes = "[\"='".getBytes(UTF_8); // screwed up json
- collector.acceptSpans(bytes, SpanBytesDecoder.JSON_V2, callback);
-
- verify(callback).onError(any(IllegalArgumentException.class));
- assertDebugLogIs("Malformed reading List from json");
- verify(metrics).incrementMessagesDropped();
- }
-
- @Test
- public void accept_storageError() {
- StorageComponent storage = mock(StorageComponent.class);
- RuntimeException error = new RuntimeException("storage disabled");
- when(storage.spanConsumer()).thenThrow(error);
- collector = new Collector.Builder(LoggerFactory.getLogger(""))
- .metrics(metrics)
- .storage(storage)
- .build();
-
- collector.accept(TRACE, callback);
-
- verify(callback).onSuccess(null); // error is async
- assertDebugLogIs("Cannot store spans [1, 2, 2, ...] due to RuntimeException(storage disabled)");
- verify(metrics).incrementSpans(4);
- verify(metrics).incrementSpansDropped(4);
- }
-
- @Test
- public void acceptSpans_emptyMessageOk() {
- byte[] bytes = new byte[] {'[', ']'};
- collector.acceptSpans(bytes, callback);
-
- verify(collector).acceptSpans(bytes, SpanBytesDecoder.JSON_V1, callback);
-
- verify(callback).onSuccess(null);
- assertThat(testLogger.getLoggingEvents()).isEmpty();
- assertThat(storage.getTraces()).isEmpty();
- }
-
- @Test
- public void storeSpansCallback_toStringIncludesSpanIds() {
- Span span2 = CLIENT_SPAN.toBuilder().id("3").build();
- when(collector.idString(span2)).thenReturn("3");
-
- assertThat(collector.new StoreSpans(asList(CLIENT_SPAN, span2)))
- .hasToString("StoreSpans([1, 3])");
- }
-
- @Test
- public void storeSpansCallback_toStringIncludesSpanIds_noMoreThan3() {
- assertThat(unprefixIdString(collector.new StoreSpans(TRACE).toString()))
- .hasToString("StoreSpans([1, 1, 2, ...])");
- }
-
- @Test
- public void storeSpansCallback_onErrorWithNullMessage() {
- RuntimeException error = new RuntimeException();
-
- Callback callback = collector.new StoreSpans(TRACE);
- callback.onError(error);
-
- assertDebugLogIs("Cannot store spans [1, 1, 2, ...] due to RuntimeException()");
- verify(metrics).incrementSpansDropped(4);
- }
-
- @Test
- public void storeSpansCallback_onErrorWithMessage() {
- IllegalArgumentException error = new IllegalArgumentException("no beer");
- Callback callback = collector.new StoreSpans(TRACE);
- callback.onError(error);
-
- assertDebugLogIs("Cannot store spans [1, 1, 2, ...] due to IllegalArgumentException(no beer)");
- verify(metrics).incrementSpansDropped(4);
- }
-
- @Test
- public void errorAcceptingSpans_onErrorRejectedExecution() {
- RuntimeException error = new RejectedExecutionException("slow down");
- collector.handleStorageError(TRACE, error, callback);
-
- verify(callback).onError(error);
- assertDebugLogIs(
- "Cannot store spans [1, 1, 2, ...] due to RejectedExecutionException(slow down)");
- verify(metrics).incrementSpansDropped(4);
- }
-
- public void handleStorageError_onErrorWithNullMessage() {
- RuntimeException error = new RuntimeException();
- collector.handleStorageError(TRACE, error, callback);
-
- verify(callback).onError(error);
- assertDebugLogIs("Cannot store spans [1, 1, 2, ...] due to RuntimeException()");
- verify(metrics).incrementSpansDropped(4);
- }
-
- @Test
- public void handleStorageError_onErrorWithMessage() {
- RuntimeException error = new IllegalArgumentException("no beer");
- collector.handleStorageError(TRACE, error, callback);
-
- verify(callback).onError(error);
- assertDebugLogIs("Cannot store spans [1, 1, 2, ...] due to IllegalArgumentException(no beer)");
- verify(metrics).incrementSpansDropped(4);
- }
-
- @Test
- public void handleDecodeError_onErrorWithNullMessage() {
- RuntimeException error = new RuntimeException();
- collector.handleDecodeError(error, callback);
-
- verify(callback).onError(error);
- assertDebugLogIs("Cannot decode spans due to RuntimeException()");
- verify(metrics).incrementMessagesDropped();
- }
-
- @Test
- public void handleDecodeError_onErrorWithMessage() {
- IllegalArgumentException error = new IllegalArgumentException("no beer");
- collector.handleDecodeError(error, callback);
-
- verify(callback).onError(error);
- assertDebugLogIs("Cannot decode spans due to IllegalArgumentException(no beer)");
- verify(metrics).incrementMessagesDropped();
- }
-
- @Test
- public void handleDecodeError_doesntWrapMessageOnMalformedException() {
- IllegalArgumentException error = new IllegalArgumentException("Malformed reading spans");
- collector.handleDecodeError(error, callback);
-
- verify(callback).onError(error);
- assertDebugLogIs("Malformed reading spans");
- verify(metrics).incrementMessagesDropped();
- }
-
- @Test
- public void handleDecodeError_doesntWrapMessageOnTruncatedException() {
- IllegalArgumentException error = new IllegalArgumentException("Truncated reading spans");
- collector.handleDecodeError(error, callback);
-
- verify(callback).onError(error);
- assertDebugLogIs("Truncated reading spans");
- verify(metrics).incrementMessagesDropped();
- }
-
- private String unprefixIdString(String msg) {
- return msg.replaceAll("7180c278b62e8f6a216a2aea45d08fc9/000000000000000", "");
- }
-
- private void assertDebugLogIs(String message) {
- assertThat(testLogger.getLoggingEvents())
- .hasSize(1)
- .filteredOn(event -> event.getLevel().equals(Level.DEBUG))
- .extracting(event -> unprefixIdString(event.getMessage()))
- .containsOnly(message);
- }
-}
diff --git a/zipkin-collector/kafka/README.md b/zipkin-collector/kafka/README.md
deleted file mode 100644
index c68ae2c1b36..00000000000
--- a/zipkin-collector/kafka/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-# collector-kafka
-
-## KafkaCollector
-This collector is implemented as a Kafka consumer supporting Kafka brokers running
-version 0.10.0.0 or later. It polls a Kafka [topic](#kafka-configuration) for messages that contain
-a list of spans in json or TBinaryProtocol big-endian encoding. These
-spans are pushed to a span consumer.
-
-For information about running this collector as a module in Zipkin server, see
-the [Zipkin Server README](../../zipkin-server/README.md#kafka-collector).
-
-When using this collector as a library outside of Zipkin server,
-[zipkin2.collector.kafka.KafkaCollector.Builder](src/main/java/zipkin2/collector/kafka/KafkaCollector.java)
-includes defaults that will operate against a Kafka topic name `zipkin`.
-
-## Encoding spans into Kafka messages
-The message's binary data includes a list of spans. Supported encodings
-are the same as the http [POST /spans](https://zipkin.io/zipkin-api/#/paths/%252Fspans) body.
-
-### Json
-The message's binary data is a list of spans in json. The first character must be '[' (decimal 91).
-
-`Codec.JSON.writeSpans(spans)` performs the correct json encoding.
-
-Here's an example, sending a list of a single span to the zipkin topic:
-
-```bash
-$ kafka-console-producer.sh --broker-list $ADVERTISED_HOST:9092 --topic zipkin
-[{"traceId":"1","name":"bang","id":"2","timestamp":1470150004071068,"duration":1,"localEndpoint":{"serviceName":"flintstones"},"tags":{"lc":"bamm-bamm"}}]
-```
-
-### Thrift
-The message's binary data includes a list header followed by N spans serialized in TBinaryProtocol
-
-`Codec.THRIFT.writeSpans(spans)` encodes spans in the following fashion:
-```
-write_byte(12) // type of the list elements: 12 == struct
-write_i32(count) // count of spans that will follow
-for (int i = 0; i < count; i++) {
- writeTBinaryProtocol(spans(i))
-}
-```
-
-### Legacy encoding
-Older versions of zipkin accepted a single span per message, as opposed
-to a list per message. This practice is deprecated, but still supported.
-
-## Kafka configuration
-
-Below are a few guidelines for the Kafka infrastructure used by this collector:
-* The collector does not explicitly create the `zipkin` topic itself. If your cluster has auto topic creation enabled then it will be created by Kafka automatically using the broker configured defaults. We recommend therefor creating the topic manually before starting the collector, using configuration parameters adapted for your Zipkin setup.
-* The collector will not fail if the `zipkin` topic does not exist, it will instead just wait for the topic to become available.
-* A size based retention makes more sense than the default time based (1 week), to safeguard against large bursts of span data.
-* The collector starts 1 instance of `KafkaConsumer` by default. We do recommend creating the `zipkin` topic with 6 or more partitions however, as it allows you to easily scale out the collector later by increasing the [KAFKA_STREAMS](../../zipkin-server/README.md#kafka-collector) parameter.
-* As Zipkin reporter sends batches of spans which do not rely on any kind of ordering guarantee (key=null), you can increase the number of partitions without affecting ordering. It does not make sense however to have more `KafkaConsumer` instances than partitions as the instances will just be idle and not consume anything.
-* Monitoring the consumer lag of the collector as well as the size of the topic will help you to decide if scaling up or down is needed.
-* Tuning this collector should happen in coordination with the storage backend. Parameters like `max.poll.records`, `fetch.max.bytes` can prevent the collector from overloading the storage backend, or if it's sized properly they could instead be used to increase ingestion rate.
-* A large and consistent consumer lag can indicate that the storage has difficulties with the ingestion rate and could be scaled up.
-
-## Logging
-Zipkin by default suppresses all logging output from Kafka client operations as they can get quite verbose. Start Zipkin with `--logging.level.org.apache.kafka=INFO` or similar to override this during troubleshooting for example.
diff --git a/zipkin-collector/kafka/pom.xml b/zipkin-collector/kafka/pom.xml
deleted file mode 100644
index d6c2a113995..00000000000
--- a/zipkin-collector/kafka/pom.xml
+++ /dev/null
@@ -1,55 +0,0 @@
-
-
-
- 4.0.0
-
-
- io.zipkin.zipkin2
- zipkin-collector-parent
- 2.24.4-SNAPSHOT
-
-
- zipkin-collector-kafka
- Collector: Kafka 0.10+
-
-
- ${project.basedir}/../..
- 3.4.0
-
-
-
-
- ${project.groupId}
- zipkin-collector
- ${project.version}
-
-
-
- org.apache.kafka
- kafka-clients
- ${kafka.version}
-
-
-
-
- com.fasterxml.jackson.core
- jackson-databind
- ${jackson.version}
- test
-
-
-
diff --git a/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollector.java b/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollector.java
deleted file mode 100644
index df4585b88cf..00000000000
--- a/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollector.java
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.kafka;
-
-import java.time.Duration;
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import org.apache.kafka.clients.admin.AdminClient;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.common.KafkaException;
-import org.apache.kafka.common.KafkaFuture;
-import org.apache.kafka.common.config.ConfigException;
-import org.apache.kafka.common.errors.InterruptException;
-import org.apache.kafka.common.serialization.ByteArrayDeserializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import zipkin2.Call;
-import zipkin2.CheckResult;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorComponent;
-import zipkin2.collector.CollectorMetrics;
-import zipkin2.collector.CollectorSampler;
-import zipkin2.storage.SpanConsumer;
-import zipkin2.storage.StorageComponent;
-
-import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG;
-import static org.apache.kafka.clients.consumer.ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG;
-import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG;
-import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG;
-import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG;
-
-/**
- * This collector polls a Kafka topic for messages that contain TBinaryProtocol big-endian encoded
- * lists of spans. These spans are pushed to a {@link SpanConsumer#accept span consumer}.
- *
- *
This collector uses a Kafka 0.10+ consumer.
- */
-public final class KafkaCollector extends CollectorComponent {
- private static final Logger LOG = LoggerFactory.getLogger(KafkaCollector.class);
-
- public static Builder builder() {
- return new Builder();
- }
-
- /** Configuration including defaults needed to consume spans from a Kafka topic. */
- public static final class Builder extends CollectorComponent.Builder {
- final Properties properties = new Properties();
- final Collector.Builder delegate = Collector.newBuilder(KafkaCollector.class);
- CollectorMetrics metrics = CollectorMetrics.NOOP_METRICS;
- String topic = "zipkin";
- int streams = 1;
-
- @Override
- public Builder storage(StorageComponent storage) {
- delegate.storage(storage);
- return this;
- }
-
- @Override
- public Builder sampler(CollectorSampler sampler) {
- delegate.sampler(sampler);
- return this;
- }
-
- @Override
- public Builder metrics(CollectorMetrics metrics) {
- if (metrics == null) throw new NullPointerException("metrics == null");
- this.metrics = metrics.forTransport("kafka");
- delegate.metrics(this.metrics);
- return this;
- }
-
- /**
- * Topic zipkin spans will be consumed from. Defaults to "zipkin". Multiple topics may be
- * specified if comma delimited.
- */
- public Builder topic(String topic) {
- if (topic == null) throw new NullPointerException("topic == null");
- this.topic = topic;
- return this;
- }
-
- /** The bootstrapServers connect string, ex. 127.0.0.1:9092. No default. */
- public Builder bootstrapServers(String bootstrapServers) {
- if (bootstrapServers == null) throw new NullPointerException("bootstrapServers == null");
- properties.put(BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
- return this;
- }
-
- /** The consumer group this process is consuming on behalf of. Defaults to "zipkin" */
- public Builder groupId(String groupId) {
- if (groupId == null) throw new NullPointerException("groupId == null");
- properties.put(GROUP_ID_CONFIG, groupId);
- return this;
- }
-
- /** Count of threads consuming the topic. Defaults to 1 */
- public Builder streams(int streams) {
- this.streams = streams;
- return this;
- }
-
- /**
- * By default, a consumer will be built from properties derived from builder defaults, as well
- * as "auto.offset.reset" -> "earliest". Any properties set here will override the consumer
- * config.
- *
- *
For example: Only consume spans since you connected by setting the below.
- *
- *
- *
- * @see org.apache.kafka.clients.consumer.ConsumerConfig
- */
- public final Builder overrides(Map overrides) {
- if (overrides == null) throw new NullPointerException("overrides == null");
- properties.putAll(overrides);
- return this;
- }
-
- @Override
- public KafkaCollector build() {
- return new KafkaCollector(this);
- }
-
- Builder() {
- // Settings below correspond to "New Consumer Configs"
- // https://kafka.apache.org/documentation/#newconsumerconfigs
- properties.put(GROUP_ID_CONFIG, "zipkin");
- properties.put(AUTO_OFFSET_RESET_CONFIG, "earliest");
- properties.put(KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
- properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
- }
- }
-
- final LazyKafkaWorkers kafkaWorkers;
- final Properties properties;
- volatile AdminClient adminClient;
-
- KafkaCollector(Builder builder) {
- kafkaWorkers = new LazyKafkaWorkers(builder);
- properties = builder.properties;
- }
-
- @Override
- public KafkaCollector start() {
- kafkaWorkers.start();
- return this;
- }
-
- @Override
- public CheckResult check() {
- try {
- CheckResult failure = kafkaWorkers.failure.get(); // check the kafka workers didn't quit
- if (failure != null) return failure;
- KafkaFuture maybeClusterId = getAdminClient().describeCluster().clusterId();
- maybeClusterId.get(1, TimeUnit.SECONDS);
- return CheckResult.OK;
- } catch (Throwable e) {
- Call.propagateIfFatal(e);
- return CheckResult.failed(e);
- }
- }
-
- AdminClient getAdminClient() {
- if (adminClient == null) {
- synchronized (this) {
- if (adminClient == null) {
- adminClient = AdminClient.create(properties);
- }
- }
- }
- return adminClient;
- }
-
- @Override
- public void close() {
- kafkaWorkers.close();
- if (adminClient != null) adminClient.close(Duration.ofSeconds(1));
- }
-
- @Override public final String toString() {
- return "KafkaCollector{"
- + "bootstrapServers=" + properties.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)
- + ", topic=" + kafkaWorkers.builder.topic
- + "}";
- }
-
- static final class LazyKafkaWorkers {
- final int streams;
- final Builder builder;
- final AtomicReference failure = new AtomicReference<>();
- final CopyOnWriteArrayList workers = new CopyOnWriteArrayList<>();
- volatile ExecutorService pool;
-
- LazyKafkaWorkers(Builder builder) {
- this.streams = builder.streams;
- this.builder = builder;
- }
-
- void start() {
- if (pool == null) {
- synchronized (this) {
- if (pool == null) {
- pool = compute();
- }
- }
- }
- }
-
- void close() {
- ExecutorService maybePool = pool;
- if (maybePool == null) return;
- for (KafkaCollectorWorker worker : workers) {
- worker.stop();
- }
- maybePool.shutdown();
- try {
- if (!maybePool.awaitTermination(2, TimeUnit.SECONDS)) {
- // Timeout exceeded: force shutdown
- maybePool.shutdownNow();
- }
- } catch (InterruptedException e) {
- // at least we tried
- }
- }
-
- ExecutorService compute() {
- ExecutorService pool =
- streams == 1 ? Executors.newSingleThreadExecutor() : Executors.newFixedThreadPool(streams);
-
- for (int i = 0; i < streams; i++) {
- // TODO: bad idea to lazy reference properties from a mutable builder
- // copy them here and then pass this to the KafkaCollectorWorker constructor instead
- KafkaCollectorWorker worker = new KafkaCollectorWorker(builder);
- workers.add(worker);
- pool.execute(guardFailures(worker));
- }
-
- return pool;
- }
-
- Runnable guardFailures(final Runnable delegate) {
- return () -> {
- try {
- delegate.run();
- } catch (InterruptException e) {
- // Interrupts are normal on shutdown, intentionally swallow
- } catch (KafkaException e) {
- if (e.getCause() instanceof ConfigException) e = (KafkaException) e.getCause();
- LOG.error("Kafka worker exited with exception", e);
- failure.set(CheckResult.failed(e));
- } catch (RuntimeException e) {
- LOG.error("Kafka worker exited with exception", e);
- failure.set(CheckResult.failed(e));
- } catch (Error e) {
- LOG.error("Kafka worker exited with error", e);
- failure.set(CheckResult.failed(new RuntimeException(e)));
- }
- };
- }
- }
-}
diff --git a/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollectorWorker.java b/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollectorWorker.java
deleted file mode 100644
index 98e3178a500..00000000000
--- a/zipkin-collector/kafka/src/main/java/zipkin2/collector/kafka/KafkaCollectorWorker.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.kafka;
-
-import java.time.Duration;
-import java.time.temporal.ChronoUnit;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Properties;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.TopicPartition;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import zipkin2.Callback;
-import zipkin2.Span;
-import zipkin2.codec.SpanBytesDecoder;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorMetrics;
-
-/** Consumes spans from Kafka messages, ignoring malformed input */
-final class KafkaCollectorWorker implements Runnable {
- static final Logger LOG = LoggerFactory.getLogger(KafkaCollectorWorker.class);
- static final Callback NOOP =
- new Callback() {
- @Override
- public void onSuccess(Void value) {}
-
- @Override
- public void onError(Throwable t) {}
- };
-
- final Properties properties;
- final List topics;
- final Collector collector;
- final CollectorMetrics metrics;
- // added for integration tests only, see ITKafkaCollector
- final AtomicReference> assignedPartitions =
- new AtomicReference<>(Collections.emptyList());
- final AtomicBoolean running = new AtomicBoolean(true);
-
- KafkaCollectorWorker(KafkaCollector.Builder builder) {
- properties = builder.properties;
- topics = Arrays.asList(builder.topic.split(","));
- collector = builder.delegate.build();
- metrics = builder.metrics;
- }
-
- @Override
- public void run() {
- try (KafkaConsumer kafkaConsumer = new KafkaConsumer<>(properties)) {
- kafkaConsumer.subscribe(
- topics,
- // added for integration tests only, see ITKafkaCollector
- new ConsumerRebalanceListener() {
- @Override
- public void onPartitionsRevoked(Collection partitions) {
- // technically we should remove only the revoked partitions but for test purposes it
- // does not matter
- assignedPartitions.set(Collections.emptyList());
- }
-
- @Override
- public void onPartitionsAssigned(Collection partitions) {
- assignedPartitions.set(Collections.unmodifiableList(new ArrayList<>(partitions)));
- }
- });
- LOG.debug("Kafka consumer starting polling loop.");
- while (running.get()) {
- final ConsumerRecords consumerRecords = kafkaConsumer.poll(Duration.of(1000, ChronoUnit.MILLIS));
- LOG.debug("Kafka polling returned batch of {} messages.", consumerRecords.count());
- for (ConsumerRecord record : consumerRecords) {
- final byte[] bytes = record.value();
- metrics.incrementMessages();
- metrics.incrementBytes(bytes.length);
-
- if (bytes.length == 0) continue; // lenient on empty messages
-
- if (bytes.length < 2) { // need two bytes to check if protobuf
- metrics.incrementMessagesDropped();
- } else {
- // If we received legacy single-span encoding, decode it into a singleton list
- if (!protobuf3(bytes) && bytes[0] <= 16 && bytes[0] != 12 /* thrift, but not list */) {
- Span span;
- try {
- span = SpanBytesDecoder.THRIFT.decodeOne(bytes);
- } catch (RuntimeException e) {
- metrics.incrementMessagesDropped();
- continue;
- }
- collector.accept(Collections.singletonList(span), NOOP);
- } else {
- collector.acceptSpans(bytes, NOOP);
- }
- }
- }
- }
- } catch (RuntimeException | Error e) {
- LOG.warn("Unexpected error in polling loop spans", e);
- throw e;
- } finally {
- LOG.debug("Kafka consumer polling loop stopped. Kafka consumer closed.");
- }
- }
-
- /**
- * Stop the polling loop
- */
- public void stop() {
- running.set(false);
- }
-
- /* span key or trace ID key */
- static boolean protobuf3(byte[] bytes) {
- return bytes[0] == 10 && bytes[1] != 0; // varint follows and won't be zero
- }
-}
diff --git a/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/ITKafkaCollector.java b/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/ITKafkaCollector.java
deleted file mode 100644
index 2511e3753ae..00000000000
--- a/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/ITKafkaCollector.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.kafka;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.Properties;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.KafkaException;
-import org.apache.kafka.common.serialization.ByteArraySerializer;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestInstance;
-import org.junit.jupiter.api.Timeout;
-import org.junit.jupiter.api.extension.RegisterExtension;
-import zipkin2.Call;
-import zipkin2.Callback;
-import zipkin2.Component;
-import zipkin2.Span;
-import zipkin2.codec.SpanBytesEncoder;
-import zipkin2.collector.InMemoryCollectorMetrics;
-import zipkin2.storage.ForwardingStorageComponent;
-import zipkin2.storage.SpanConsumer;
-import zipkin2.storage.StorageComponent;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static zipkin2.TestObjects.CLIENT_SPAN;
-import static zipkin2.TestObjects.LOTS_OF_SPANS;
-import static zipkin2.TestObjects.UTF_8;
-import static zipkin2.codec.SpanBytesEncoder.JSON_V2;
-import static zipkin2.codec.SpanBytesEncoder.THRIFT;
-
-@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-@Timeout(60)
-class ITKafkaCollector {
- @RegisterExtension KafkaExtension kafka = new KafkaExtension();
-
- List spans = Arrays.asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1]);
-
- InMemoryCollectorMetrics metrics = new InMemoryCollectorMetrics();
- InMemoryCollectorMetrics kafkaMetrics = metrics.forTransport("kafka");
-
- CopyOnWriteArraySet threadsProvidingSpans = new CopyOnWriteArraySet<>();
- LinkedBlockingQueue> receivedSpans = new LinkedBlockingQueue<>();
- SpanConsumer consumer = (spans) -> {
- threadsProvidingSpans.add(Thread.currentThread());
- receivedSpans.add(spans);
- return Call.create(null);
- };
- KafkaProducer producer;
-
- @BeforeEach void setup() {
- metrics.clear();
- threadsProvidingSpans.clear();
- Properties config = new Properties();
- config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.bootstrapServer());
- producer = new KafkaProducer<>(config, new ByteArraySerializer(), new ByteArraySerializer());
- }
-
- @AfterEach void tearDown() {
- if (producer != null) producer.close();
- }
-
- @Test void checkPasses() {
- try (KafkaCollector collector = builder("check_passes").build()) {
- assertThat(collector.check().ok()).isTrue();
- }
- }
-
- /**
- * Don't raise exception (crash process), rather fail status check! This allows the health check
- * to report the cause.
- */
- @Test void check_failsOnInvalidBootstrapServers() throws Exception {
-
- KafkaCollector.Builder builder =
- builder("fail_invalid_bootstrap_servers").bootstrapServers("1.1.1.1");
-
- try (KafkaCollector collector = builder.build()) {
- collector.start();
-
- Thread.sleep(1000L); // wait for crash
-
- assertThat(collector.check().error())
- .isInstanceOf(KafkaException.class)
- .hasMessage("Invalid url in bootstrap.servers: 1.1.1.1");
- }
- }
-
- /**
- * If the Kafka broker(s) specified in the connection string are not available, the Kafka consumer
- * library will attempt to reconnect indefinitely. The Kafka consumer will not throw an exception
- * and does not expose the status of its connection to the Kafka broker(s) in its API.
- *
- * An AdminClient API instance has been added to the connector to validate that connection with
- * Kafka is available in every health check. This AdminClient reuses Consumer's properties to
- * Connect to the cluster, and request a Cluster description to validate communication with
- * Kafka.
- */
- @Test void reconnectsIndefinitelyAndReportsUnhealthyWhenKafkaUnavailable() throws Exception {
- KafkaCollector.Builder builder =
- builder("fail_invalid_bootstrap_servers").bootstrapServers("localhost:" + 9092);
-
- try (KafkaCollector collector = builder.build()) {
- collector.start();
- Thread.sleep(TimeUnit.SECONDS.toMillis(1));
- assertThat(collector.check().error()).isInstanceOf(TimeoutException.class);
- }
- }
-
- /** Ensures legacy encoding works: a single TBinaryProtocol encoded span */
- @Test void messageWithSingleThriftSpan() throws Exception {
- KafkaCollector.Builder builder = builder("single_span");
-
- byte[] bytes = THRIFT.encode(CLIENT_SPAN);
- produceSpans(bytes, builder.topic);
-
- try (KafkaCollector collector = builder.build()) {
- collector.start();
- assertThat(receivedSpans.take()).containsExactly(CLIENT_SPAN);
- }
-
- assertThat(kafkaMetrics.messages()).isEqualTo(1);
- assertThat(kafkaMetrics.messagesDropped()).isZero();
- assertThat(kafkaMetrics.bytes()).isEqualTo(bytes.length);
- assertThat(kafkaMetrics.spans()).isEqualTo(1);
- assertThat(kafkaMetrics.spansDropped()).isZero();
- }
-
- /** Ensures list encoding works: a TBinaryProtocol encoded list of spans */
- @Test void messageWithMultipleSpans_thrift() throws Exception {
- messageWithMultipleSpans(builder("multiple_spans_thrift"), THRIFT);
- }
-
- /** Ensures list encoding works: a json encoded list of spans */
- @Test void messageWithMultipleSpans_json() throws Exception {
- messageWithMultipleSpans(builder("multiple_spans_json"), SpanBytesEncoder.JSON_V1);
- }
-
- /** Ensures list encoding works: a version 2 json list of spans */
- @Test void messageWithMultipleSpans_json2() throws Exception {
- messageWithMultipleSpans(builder("multiple_spans_json2"), SpanBytesEncoder.JSON_V2);
- }
-
- /** Ensures list encoding works: proto3 ListOfSpans */
- @Test void messageWithMultipleSpans_proto3() throws Exception {
- messageWithMultipleSpans(builder("multiple_spans_proto3"), SpanBytesEncoder.PROTO3);
- }
-
- void messageWithMultipleSpans(KafkaCollector.Builder builder, SpanBytesEncoder encoder)
- throws Exception {
- byte[] message = encoder.encodeList(spans);
-
- produceSpans(message, builder.topic);
-
- try (KafkaCollector collector = builder.build()) {
- collector.start();
- assertThat(receivedSpans.take()).containsAll(spans);
- }
-
- assertThat(kafkaMetrics.messages()).isEqualTo(1);
- assertThat(kafkaMetrics.messagesDropped()).isZero();
- assertThat(kafkaMetrics.bytes()).isEqualTo(message.length);
- assertThat(kafkaMetrics.spans()).isEqualTo(spans.size());
- assertThat(kafkaMetrics.spansDropped()).isZero();
- }
-
- /** Ensures malformed spans don't hang the collector */
- @Test void skipsMalformedData() throws Exception {
- KafkaCollector.Builder builder = builder("decoder_exception");
-
- byte[] malformed1 = "[\"='".getBytes(UTF_8); // screwed up json
- byte[] malformed2 = "malformed".getBytes(UTF_8);
- produceSpans(THRIFT.encodeList(spans), builder.topic);
- produceSpans(new byte[0], builder.topic);
- produceSpans(malformed1, builder.topic);
- produceSpans(malformed2, builder.topic);
- produceSpans(THRIFT.encodeList(spans), builder.topic);
-
- try (KafkaCollector collector = builder.build()) {
- collector.start();
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- // the only way we could read this, is if the malformed spans were skipped.
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- }
-
- assertThat(kafkaMetrics.messages()).isEqualTo(5);
- assertThat(kafkaMetrics.messagesDropped()).isEqualTo(2); // only malformed, not empty
- assertThat(kafkaMetrics.bytes())
- .isEqualTo(THRIFT.encodeList(spans).length * 2 + malformed1.length + malformed2.length);
- assertThat(kafkaMetrics.spans()).isEqualTo(spans.size() * 2);
- assertThat(kafkaMetrics.spansDropped()).isZero();
- }
-
- /** Guards against errors that leak from storage, such as InvalidQueryException */
- @Test void skipsOnSpanStorageException() throws Exception {
- AtomicInteger counter = new AtomicInteger();
- consumer = (input) -> new Call.Base() {
- @Override protected Void doExecute() {
- throw new AssertionError();
- }
-
- @Override protected void doEnqueue(Callback callback) {
- if (counter.getAndIncrement() == 1) {
- callback.onError(new RuntimeException("storage fell over"));
- } else {
- receivedSpans.add(spans);
- callback.onSuccess(null);
- }
- }
-
- @Override public Call clone() {
- throw new AssertionError();
- }
- };
- final StorageComponent storage = buildStorage(consumer);
- KafkaCollector.Builder builder = builder("storage_exception").storage(storage);
-
- produceSpans(THRIFT.encodeList(spans), builder.topic);
- produceSpans(THRIFT.encodeList(spans), builder.topic); // tossed on error
- produceSpans(THRIFT.encodeList(spans), builder.topic);
-
- try (KafkaCollector collector = builder.build()) {
- collector.start();
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- // the only way we could read this, is if the malformed span was skipped.
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- }
-
- assertThat(kafkaMetrics.messages()).isEqualTo(3);
- assertThat(kafkaMetrics.messagesDropped()).isZero(); // storage failure isn't a message failure
- assertThat(kafkaMetrics.bytes()).isEqualTo(THRIFT.encodeList(spans).length * 3);
- assertThat(kafkaMetrics.spans()).isEqualTo(spans.size() * 3);
- assertThat(kafkaMetrics.spansDropped()).isEqualTo(spans.size()); // only one dropped
- }
-
- @Test void messagesDistributedAcrossMultipleThreadsSuccessfully() throws Exception {
- KafkaCollector.Builder builder = builder("multi_thread", 2);
-
- kafka.prepareTopics(builder.topic, 2);
- warmUpTopic(builder.topic);
-
- final byte[] traceBytes = JSON_V2.encodeList(spans);
- try (KafkaCollector collector = builder.build()) {
- collector.start();
- waitForPartitionAssignments(collector);
- produceSpans(traceBytes, builder.topic, 0);
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- produceSpans(traceBytes, builder.topic, 1);
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- }
-
- assertThat(threadsProvidingSpans.size()).isEqualTo(2);
-
- assertThat(kafkaMetrics.messages()).isEqualTo(3); // 2 + empty body for warmup
- assertThat(kafkaMetrics.messagesDropped()).isZero();
- assertThat(kafkaMetrics.bytes()).isEqualTo(traceBytes.length * 2);
- assertThat(kafkaMetrics.spans()).isEqualTo(spans.size() * 2);
- assertThat(kafkaMetrics.spansDropped()).isZero();
- }
-
- @Test void multipleTopicsCommaDelimited() {
- try (KafkaCollector collector = builder("topic1,topic2").build()) {
- collector.start();
-
- assertThat(collector.kafkaWorkers.workers.get(0).topics).containsExactly("topic1", "topic2");
- }
- }
-
- /**
- * The {@code toString()} of {@link Component} implementations appear in health check endpoints.
- * Since these are likely to be exposed in logs and other monitoring tools, care should be taken
- * to ensure {@code toString()} output is a reasonable length and does not contain sensitive
- * information.
- */
- @Test void toStringContainsOnlySummaryInformation() {
- try (KafkaCollector collector = builder("muah").build()) {
- collector.start();
-
- assertThat(collector).hasToString(
- String.format("KafkaCollector{bootstrapServers=%s, topic=%s}", kafka.bootstrapServer(),
- "muah")
- );
- }
- }
-
- /**
- * Producing this empty message triggers auto-creation of the topic and gets things "warmed up" on
- * the broker before the consumers subscribe. Without this, the topic is auto-created when the
- * first consumer subscribes but there appears to be a race condition where the existence of the
- * topic is not known to the partition assignor when the consumer group goes through its initial
- * re-balance. As a result, no partitions are assigned, there are no further changes to group
- * membership to trigger another re-balance, and no messages are consumed. This initial message is
- * not necessary if the test broker is re-created for each test, but that increases execution time
- * for the suite by a factor of 10x (2-3s to ~25s on my local machine).
- */
- void warmUpTopic(String topic) {
- produceSpans(new byte[0], topic);
- }
-
- /**
- * Wait until all kafka consumers created by the collector have at least one partition assigned.
- */
- void waitForPartitionAssignments(KafkaCollector collector) throws Exception {
- long consumersWithAssignments = 0;
- while (consumersWithAssignments < collector.kafkaWorkers.streams) {
- Thread.sleep(10);
- consumersWithAssignments =
- collector
- .kafkaWorkers
- .workers
- .stream()
- .filter(w -> !w.assignedPartitions.get().isEmpty())
- .count();
- }
- }
-
- void produceSpans(byte[] spans, String topic) {
- produceSpans(spans, topic, 0);
- }
-
- void produceSpans(byte[] spans, String topic, Integer partition) {
- producer.send(new ProducerRecord<>(topic, partition, null, spans));
- producer.flush();
- }
-
- KafkaCollector.Builder builder(String topic) {
- return builder(topic, 1);
- }
-
- KafkaCollector.Builder builder(String topic, int streams) {
- return kafka.newCollectorBuilder(topic, streams)
- .metrics(metrics)
- .storage(buildStorage(consumer));
- }
-
- static StorageComponent buildStorage(final SpanConsumer spanConsumer) {
- return new ForwardingStorageComponent() {
- @Override protected StorageComponent delegate() {
- throw new AssertionError();
- }
-
- @Override public SpanConsumer spanConsumer() {
- return spanConsumer;
- }
- };
- }
-}
diff --git a/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/KafkaExtension.java b/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/KafkaExtension.java
deleted file mode 100644
index 38459d495a2..00000000000
--- a/zipkin-collector/kafka/src/test/java/zipkin2/collector/kafka/KafkaExtension.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.kafka;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-import java.util.concurrent.ExecutionException;
-import org.apache.kafka.clients.admin.AdminClient;
-import org.apache.kafka.clients.admin.AdminClientConfig;
-import org.apache.kafka.clients.admin.NewTopic;
-import org.apache.kafka.common.errors.TopicExistsException;
-import org.junit.jupiter.api.extension.AfterAllCallback;
-import org.junit.jupiter.api.extension.BeforeAllCallback;
-import org.junit.jupiter.api.extension.ExtensionContext;
-import org.opentest4j.TestAbortedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testcontainers.containers.GenericContainer;
-import org.testcontainers.containers.InternetProtocol;
-import org.testcontainers.containers.output.Slf4jLogConsumer;
-import org.testcontainers.containers.wait.strategy.Wait;
-
-import static org.testcontainers.utility.DockerImageName.parse;
-
-class KafkaExtension implements BeforeAllCallback, AfterAllCallback {
- static final Logger LOGGER = LoggerFactory.getLogger(KafkaExtension.class);
- static final int KAFKA_PORT = 19092;
-
- final KafkaContainer container = new KafkaContainer();
-
- @Override public void beforeAll(ExtensionContext context) {
- if (context.getRequiredTestClass().getEnclosingClass() != null) {
- // Only run once in outermost scope.
- return;
- }
-
- container.start();
- LOGGER.info("Using bootstrapServer " + bootstrapServer());
- }
-
- void prepareTopics(String topics, int partitions) {
- Properties config = new Properties();
- config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer());
-
- List newTopics = new ArrayList<>();
- for (String topic : topics.split(",")) {
- if ("".equals(topic)) continue;
- newTopics.add(new NewTopic(topic, partitions, (short) 1));
- }
-
- try (AdminClient adminClient = AdminClient.create(config)) {
- adminClient.createTopics(newTopics).all().get();
- } catch (InterruptedException | ExecutionException e) {
- if (e.getCause() != null && e.getCause() instanceof TopicExistsException) return;
- throw new TestAbortedException(
- "Topics could not be created " + newTopics + ": " + e.getMessage(), e);
- }
- }
-
- String bootstrapServer() {
- return container.getHost() + ":" + container.getMappedPort(KAFKA_PORT);
- }
-
- KafkaCollector.Builder newCollectorBuilder(String topic, int streams) {
- prepareTopics(topic, streams);
- return KafkaCollector.builder().bootstrapServers(bootstrapServer())
- .topic(topic)
- .groupId(topic + "_group")
- .streams(streams);
- }
-
- @Override public void afterAll(ExtensionContext context) {
- if (context.getRequiredTestClass().getEnclosingClass() != null) {
- // Only run once in outermost scope.
- return;
- }
- container.stop();
- }
-
- // mostly waiting for https://github.com/testcontainers/testcontainers-java/issues/3537
- static final class KafkaContainer extends GenericContainer {
- KafkaContainer() {
- super(parse("ghcr.io/openzipkin/zipkin-kafka:2.23.2"));
- if ("true".equals(System.getProperty("docker.skip"))) {
- throw new TestAbortedException("${docker.skip} == true");
- }
- waitStrategy = Wait.forHealthcheck();
- // 19092 is for connections from the Docker host and needs to be used as a fixed port.
- // TODO: someone who knows Kafka well, make ^^ comment better!
- addFixedExposedPort(KAFKA_PORT, KAFKA_PORT, InternetProtocol.TCP);
- withLogConsumer(new Slf4jLogConsumer(LOGGER));
- }
- }
-}
diff --git a/zipkin-collector/kafka/src/test/resources/simplelogger.properties b/zipkin-collector/kafka/src/test/resources/simplelogger.properties
deleted file mode 100644
index 42747f1736c..00000000000
--- a/zipkin-collector/kafka/src/test/resources/simplelogger.properties
+++ /dev/null
@@ -1,11 +0,0 @@
-# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options
-
-org.slf4j.simpleLogger.logFile=System.out
-org.slf4j.simpleLogger.defaultLogLevel=warn
-org.slf4j.simpleLogger.showDateTime=true
-org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS
-
-org.slf4j.simpleLogger.log.com.github.charithe.kafka=info
-org.slf4j.simpleLogger.log.zipkin2.collector.kafka=debug
-# uncomment to include kafka consumer configuration in test logs
-#logger.org.apache.kafka.clients.level=info
diff --git a/zipkin-collector/pom.xml b/zipkin-collector/pom.xml
deleted file mode 100644
index a9f7857acf7..00000000000
--- a/zipkin-collector/pom.xml
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-
- 4.0.0
-
-
- io.zipkin
- zipkin-parent
- 2.24.4-SNAPSHOT
-
-
- io.zipkin.zipkin2
- zipkin-collector-parent
- Collector
- pom
-
-
- ${project.basedir}/..
- 1.8
- java18
-
-
-
- core
- activemq
- kafka
- rabbitmq
- scribe
-
-
-
-
- ${project.groupId}
- zipkin
- ${project.version}
-
-
-
-
- com.google.code.gson
- gson
- ${gson.version}
- test
-
-
- ${project.groupId}
- zipkin-tests
- ${project.version}
- test
-
-
- org.slf4j
- slf4j-simple
- ${slf4j.version}
- test
-
-
- org.testcontainers
- testcontainers
- ${testcontainers.version}
- test
-
-
-
diff --git a/zipkin-collector/rabbitmq/README.md b/zipkin-collector/rabbitmq/README.md
deleted file mode 100644
index e47f57d1389..00000000000
--- a/zipkin-collector/rabbitmq/README.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# collector-rabbitmq
-
-## RabbitMQCollector
-This collector consumes a RabbitMQ queue for messages that contain a list of spans.
-Its only dependencies besides Zipkin core are the `slf4j-api` and the [RabbitMQ Java Client](https://github.com/rabbitmq/rabbitmq-java-client).
-
-### Configuration
-
-The following configuration can be set for the RabbitMQ Collector.
-
-Property | Environment Variable | Description
---- | --- | ---
-`zipkin.collector.rabbitmq.concurrency` | `RABBIT_CONCURRENCY` | Number of concurrent consumers. Defaults to `1`
-`zipkin.collector.rabbitmq.connection-timeout` | `RABBIT_CONNECTION_TIMEOUT` | Milliseconds to wait establishing a connection. Defaults to `60000` (1 minute)
-`zipkin.collector.rabbitmq.queue` | `RABBIT_QUEUE` | Queue from which to collect span messages. Defaults to `zipkin`
-`zipkin.collector.rabbitmq.uri` | `RABBIT_URI` | [RabbitMQ URI spec](https://www.rabbitmq.com/uri-spec.html)-compliant URI, ex. `amqp://user:pass@host:10000/vhost`
-
-If the URI is set, the following properties will be ignored.
-
-Property | Environment Variable | Description
---- | --- | ---
-`zipkin.collector.rabbitmq.addresses` | `RABBIT_ADDRESSES` | Comma-separated list of RabbitMQ addresses, ex. `localhost:5672,localhost:5673`
-`zipkin.collector.rabbitmq.password` | `RABBIT_PASSWORD`| Password to use when connecting to RabbitMQ. Defaults to `guest`
-`zipkin.collector.rabbitmq.username` | `RABBIT_USER` | Username to use when connecting to RabbitMQ. Defaults to `guest`
-`zipkin.collector.rabbitmq.virtual-host` | `RABBIT_VIRTUAL_HOST` | RabbitMQ virtual host to use. Defaults to `/`
-`zipkin.collector.rabbitmq.use-ssl` | `RABBIT_USE_SSL` | Set to `true` to use SSL when connecting to RabbitMQ
-
-### Caveats
-
-The configured queue will be idempotently declared as a durable queue.
-
-This collector uses one connection to RabbitMQ, with the configured `concurrency` number of threads
-each using one channel to consume messages.
-
-Consumption is done with `autoAck` on, so messages that fail to process successfully are not retried.
-
-## Encoding spans into RabbitMQ messages
-The message's body should be the bytes of an encoded list of spans.
-
-### JSON
-A list of Spans in JSON. The first character must be '[' (decimal 91).
-
-`SpanBytesEncoder.JSON_V2.encodeList(spans)` performs the correct JSON encoding.
-
-## Local testing
-
-The following assumes you are running an instance of RabbitMQ locally on the default port (5672).
-You can download and install RabbitMQ following [instructions available here](https://www.rabbitmq.com/download.html).
-With the [RabbitMQ Management CLI](https://www.rabbitmq.com/management-cli.html) you can easily publish
-one-off spans to RabbitMQ to be collected by this collector.
-
-1. Start RabbitMQ server
-2. Start Zipkin server
-```bash
-$ RABBIT_ADDRESSES=localhost java -jar zipkin.jar
-```
-3. Save an array of spans to a file like `sample-spans.json`
-```json
-[{"traceId":"9032b04972e475c5","id":"9032b04972e475c5","kind":"SERVER","name":"get","timestamp":1505990621526000,"duration":612898,"localEndpoint":{"serviceName":"brave-webmvc-example","ipv4":"192.168.1.113"},"remoteEndpoint":{"serviceName":"","ipv4":"127.0.0.1","port":60149},"tags":{"error":"500 Internal Server Error","http.path":"/a"}}]
-```
-4. Publish them using the CLI
-```bash
-$ rabbitmqadmin publish exchange=amq.default routing_key=zipkin < sample-spans.json
-```
diff --git a/zipkin-collector/rabbitmq/pom.xml b/zipkin-collector/rabbitmq/pom.xml
deleted file mode 100644
index 1332cbf9434..00000000000
--- a/zipkin-collector/rabbitmq/pom.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-
-
-
- 4.0.0
-
-
- io.zipkin.zipkin2
- zipkin-collector-parent
- 2.24.4-SNAPSHOT
-
-
- zipkin-collector-rabbitmq
- Collector: RabbitMQ
- Zipkin span collector for RabbitMQ transport
-
-
- ${project.basedir}/../..
- 4.12.0
-
-
-
-
- ${project.groupId}
- zipkin-collector
- ${project.version}
-
-
-
- com.rabbitmq
- amqp-client
- ${amqp-client.version}
-
-
-
diff --git a/zipkin-collector/rabbitmq/src/main/java/zipkin2/collector/rabbitmq/RabbitMQCollector.java b/zipkin-collector/rabbitmq/src/main/java/zipkin2/collector/rabbitmq/RabbitMQCollector.java
deleted file mode 100644
index 973d3cdd99a..00000000000
--- a/zipkin-collector/rabbitmq/src/main/java/zipkin2/collector/rabbitmq/RabbitMQCollector.java
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.rabbitmq;
-
-import com.rabbitmq.client.AMQP.BasicProperties;
-import com.rabbitmq.client.Address;
-import com.rabbitmq.client.Channel;
-import com.rabbitmq.client.Connection;
-import com.rabbitmq.client.ConnectionFactory;
-import com.rabbitmq.client.DefaultConsumer;
-import com.rabbitmq.client.Envelope;
-import java.io.IOException;
-import java.io.UncheckedIOException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-import zipkin2.Call;
-import zipkin2.Callback;
-import zipkin2.CheckResult;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorComponent;
-import zipkin2.collector.CollectorMetrics;
-import zipkin2.collector.CollectorSampler;
-import zipkin2.storage.StorageComponent;
-
-/** This collector consumes encoded binary messages from a RabbitMQ queue. */
-public final class RabbitMQCollector extends CollectorComponent {
- static final Callback NOOP = new Callback() {
- @Override public void onSuccess(Void value) {
- }
-
- @Override public void onError(Throwable t) {
- }
- };
-
- public static Builder builder() {
- return new Builder();
- }
-
- /** Configuration including defaults needed to consume spans from a RabbitMQ queue. */
- public static final class Builder extends CollectorComponent.Builder {
- Collector.Builder delegate = Collector.newBuilder(RabbitMQCollector.class);
- CollectorMetrics metrics = CollectorMetrics.NOOP_METRICS;
- String queue = "zipkin";
- ConnectionFactory connectionFactory = new ConnectionFactory();
- Address[] addresses;
- int concurrency = 1;
-
- @Override
- public Builder storage(StorageComponent storage) {
- this.delegate.storage(storage);
- return this;
- }
-
- @Override
- public Builder sampler(CollectorSampler sampler) {
- this.delegate.sampler(sampler);
- return this;
- }
-
- @Override
- public Builder metrics(CollectorMetrics metrics) {
- if (metrics == null) throw new NullPointerException("metrics == null");
- this.metrics = metrics.forTransport("rabbitmq");
- this.delegate.metrics(this.metrics);
- return this;
- }
-
- public Builder addresses(List addresses) {
- this.addresses = convertAddresses(addresses);
- return this;
- }
-
- public Builder concurrency(int concurrency) {
- this.concurrency = concurrency;
- return this;
- }
-
- public Builder connectionFactory(ConnectionFactory connectionFactory) {
- if (connectionFactory == null) throw new NullPointerException("connectionFactory == null");
- this.connectionFactory = connectionFactory;
- return this;
- }
-
- /** Queue zipkin spans will be consumed from. Defaults to "zipkin-spans". */
- public Builder queue(String queue) {
- if (queue == null) throw new NullPointerException("queue == null");
- this.queue = queue;
- return this;
- }
-
- @Override
- public RabbitMQCollector build() {
- return new RabbitMQCollector(this);
- }
- }
-
- final String queue;
- final LazyInit connection;
-
- RabbitMQCollector(Builder builder) {
- this.queue = builder.queue;
- this.connection = new LazyInit(builder);
- }
-
- @Override
- public RabbitMQCollector start() {
- connection.get();
- return this;
- }
-
- @Override
- public CheckResult check() {
- try {
- start();
- CheckResult failure = connection.failure.get();
- if (failure != null) return failure;
- return CheckResult.OK;
- } catch (Throwable e) {
- Call.propagateIfFatal(e);
- return CheckResult.failed(e);
- }
- }
-
- @Override
- public void close() throws IOException {
- connection.close();
- }
-
- @Override public final String toString() {
- return "RabbitMQCollector{addresses="
- + Arrays.toString(connection.builder.addresses)
- + ", queue="
- + queue
- + "}";
- }
-
- /** Lazy creates a connection and a queue before starting consumers */
- static final class LazyInit {
- final Builder builder;
- final AtomicReference failure = new AtomicReference<>();
- volatile Connection connection;
-
- // TODO: bad idea to lazy reference properties from a mutable builder
- // copy them here and then pass this to the KafkaCollectorWorker ctor instead
- LazyInit(Builder builder) {
- this.builder = builder;
- }
-
- Connection get() {
- if (connection == null) {
- synchronized (this) {
- if (connection == null) {
- connection = compute();
- }
- }
- }
- return connection;
- }
-
- void close() throws IOException {
- Connection maybeConnection = connection;
- if (maybeConnection != null) maybeConnection.close();
- }
-
- Connection compute() {
- Connection connection;
- try {
- connection =
- (builder.addresses == null)
- ? builder.connectionFactory.newConnection()
- : builder.connectionFactory.newConnection(builder.addresses);
- declareQueueIfMissing(connection);
- } catch (IOException e) {
- throw new UncheckedIOException(
- "Unable to establish connection to RabbitMQ server: " + e.getMessage(), e);
- } catch (TimeoutException e) {
- throw new RuntimeException(
- "Timeout establishing connection to RabbitMQ server: " + e.getMessage(), e);
- }
- Collector collector = builder.delegate.build();
- CollectorMetrics metrics = builder.metrics;
-
- for (int i = 0; i < builder.concurrency; i++) {
- String consumerTag = "zipkin-rabbitmq." + i;
- try {
- // this sets up a channel for each consumer thread.
- // We don't track channels, as the connection will close its channels implicitly
- Channel channel = connection.createChannel();
- RabbitMQSpanConsumer consumer = new RabbitMQSpanConsumer(channel, collector, metrics);
- channel.basicConsume(builder.queue, true, consumerTag, consumer);
- } catch (IOException e) {
- throw new IllegalStateException("Failed to start RabbitMQ consumer " + consumerTag, e);
- }
- }
- return connection;
- }
-
- private void declareQueueIfMissing(Connection connection) throws IOException, TimeoutException {
- Channel channel = connection.createChannel();
- try {
- // check if queue already exists
- channel.queueDeclarePassive(builder.queue);
- channel.close();
- } catch (IOException maybeQueueDoesNotExist) {
- Throwable cause = maybeQueueDoesNotExist.getCause();
- if (cause != null && cause.getMessage().contains("NOT_FOUND")) {
- channel = connection.createChannel();
- channel.queueDeclare(builder.queue, true, false, false, null);
- channel.close();
- } else {
- throw maybeQueueDoesNotExist;
- }
- }
- }
- }
-
- /**
- * Consumes spans from messages on a RabbitMQ queue. Malformed messages will be discarded. Errors
- * in the storage component will similarly be ignored, with no retry of the message.
- */
- static class RabbitMQSpanConsumer extends DefaultConsumer {
- final Collector collector;
- final CollectorMetrics metrics;
-
- RabbitMQSpanConsumer(Channel channel, Collector collector, CollectorMetrics metrics) {
- super(channel);
- this.collector = collector;
- this.metrics = metrics;
- }
-
- @Override
- public void handleDelivery(String tag, Envelope envelope, BasicProperties props, byte[] body) {
- metrics.incrementMessages();
- metrics.incrementBytes(body.length);
-
- if (body.length == 0) return; // lenient on empty messages
-
- collector.acceptSpans(body, NOOP);
- }
- }
-
- static Address[] convertAddresses(List addresses) {
- Address[] addressArray = new Address[addresses.size()];
- for (int i = 0; i < addresses.size(); i++) {
- String[] splitAddress = addresses.get(i).split(":", 100);
- String host = splitAddress[0];
- int port = -1;
- try {
- if (splitAddress.length == 2) port = Integer.parseInt(splitAddress[1]);
- } catch (NumberFormatException ignore) {
- // EmptyCatch ignored
- }
- addressArray[i] = (port > 0) ? new Address(host, port) : new Address(host);
- }
- return addressArray;
- }
-}
diff --git a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/ITRabbitMQCollector.java b/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/ITRabbitMQCollector.java
deleted file mode 100644
index 5e084020e15..00000000000
--- a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/ITRabbitMQCollector.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.rabbitmq;
-
-import com.rabbitmq.client.Channel;
-import com.rabbitmq.client.Connection;
-import com.rabbitmq.client.ConnectionFactory;
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestInstance;
-import org.junit.jupiter.api.Timeout;
-import org.junit.jupiter.api.extension.RegisterExtension;
-import zipkin2.Call;
-import zipkin2.Callback;
-import zipkin2.Component;
-import zipkin2.Span;
-import zipkin2.codec.SpanBytesEncoder;
-import zipkin2.collector.InMemoryCollectorMetrics;
-import zipkin2.storage.ForwardingStorageComponent;
-import zipkin2.storage.SpanConsumer;
-import zipkin2.storage.StorageComponent;
-
-import static java.util.Arrays.asList;
-import static org.assertj.core.api.Assertions.assertThat;
-import static zipkin2.TestObjects.LOTS_OF_SPANS;
-import static zipkin2.TestObjects.UTF_8;
-import static zipkin2.codec.SpanBytesEncoder.THRIFT;
-
-@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-@Timeout(60)
-class ITRabbitMQCollector {
- @RegisterExtension RabbitMQExtension rabbit = new RabbitMQExtension();
-
- List spans = asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1]);
-
- InMemoryCollectorMetrics metrics = new InMemoryCollectorMetrics();
- InMemoryCollectorMetrics rabbitmqMetrics = metrics.forTransport("rabbitmq");
-
- CopyOnWriteArraySet threadsProvidingSpans = new CopyOnWriteArraySet<>();
- LinkedBlockingQueue> receivedSpans = new LinkedBlockingQueue<>();
- SpanConsumer consumer = (spans) -> {
- threadsProvidingSpans.add(Thread.currentThread());
- receivedSpans.add(spans);
- return Call.create(null);
- };
- Connection connection;
-
- @BeforeEach void setup() throws Exception {
- metrics.clear();
- ConnectionFactory factory = new ConnectionFactory();
- factory.setHost(rabbit.host());
- factory.setPort(rabbit.port());
- connection = factory.newConnection();
- }
-
- @AfterEach void tearDown() throws Exception {
- if (connection != null) connection.close();
- }
-
- @Test void checkPasses() throws Exception {
- try (RabbitMQCollector collector = builder("check_passes").build()) {
- assertThat(collector.check().ok()).isTrue();
- }
- }
-
- /** Ensures list encoding works: a TBinaryProtocol encoded list of spans */
- @Test void messageWithMultipleSpans_thrift() throws Exception {
- messageWithMultipleSpans(builder("multiple_spans_thrift"), THRIFT);
- }
-
- /** Ensures list encoding works: a json encoded list of spans */
- @Test void messageWithMultipleSpans_json() throws Exception {
- messageWithMultipleSpans(builder("multiple_spans_json"), SpanBytesEncoder.JSON_V1);
- }
-
- /** Ensures list encoding works: a version 2 json list of spans */
- @Test void messageWithMultipleSpans_json2() throws Exception {
- messageWithMultipleSpans(builder("multiple_spans_json2"), SpanBytesEncoder.JSON_V2);
- }
-
- /** Ensures list encoding works: proto3 ListOfSpans */
- @Test void messageWithMultipleSpans_proto3() throws Exception {
- messageWithMultipleSpans(builder("multiple_spans_proto3"), SpanBytesEncoder.PROTO3);
- }
-
- void messageWithMultipleSpans(RabbitMQCollector.Builder builder, SpanBytesEncoder encoder)
- throws Exception {
- byte[] message = encoder.encodeList(spans);
-
- produceSpans(message, builder.queue);
-
- try (RabbitMQCollector collector = builder.build()) {
- collector.start();
- assertThat(receivedSpans.take()).containsAll(spans);
- }
-
- assertThat(rabbitmqMetrics.messages()).isEqualTo(1);
- assertThat(rabbitmqMetrics.messagesDropped()).isZero();
- assertThat(rabbitmqMetrics.bytes()).isEqualTo(message.length);
- assertThat(rabbitmqMetrics.spans()).isEqualTo(spans.size());
- assertThat(rabbitmqMetrics.spansDropped()).isZero();
- }
-
- /** Ensures malformed spans don't hang the collector */
- @Test void skipsMalformedData() throws Exception {
- RabbitMQCollector.Builder builder = builder("decoder_exception");
-
- byte[] malformed1 = "[\"='".getBytes(UTF_8); // screwed up json
- byte[] malformed2 = "malformed".getBytes(UTF_8);
- produceSpans(THRIFT.encodeList(spans), builder.queue);
- produceSpans(new byte[0], builder.queue);
- produceSpans(malformed1, builder.queue);
- produceSpans(malformed2, builder.queue);
- produceSpans(THRIFT.encodeList(spans), builder.queue);
-
- try (RabbitMQCollector collector = builder.build()) {
- collector.start();
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- // the only way we could read this, is if the malformed spans were skipped.
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- }
-
- assertThat(rabbitmqMetrics.messages()).isEqualTo(5);
- assertThat(rabbitmqMetrics.messagesDropped()).isEqualTo(2); // only malformed, not empty
- assertThat(rabbitmqMetrics.bytes())
- .isEqualTo(THRIFT.encodeList(spans).length * 2 + malformed1.length + malformed2.length);
- assertThat(rabbitmqMetrics.spans()).isEqualTo(spans.size() * 2);
- assertThat(rabbitmqMetrics.spansDropped()).isZero();
- }
-
- @Test void startsWhenConfiguredQueueDoesntExist() throws Exception {
- try (RabbitMQCollector collector = builder("ignored").queue("zipkin-test2").build()) {
- assertThat(collector.check().ok()).isTrue();
- }
- }
-
- /** Guards against errors that leak from storage, such as InvalidQueryException */
- @Test void skipsOnSpanStorageException() throws Exception {
- AtomicInteger counter = new AtomicInteger();
- consumer = (input) -> new Call.Base() {
- @Override protected Void doExecute() {
- throw new AssertionError();
- }
-
- @Override protected void doEnqueue(Callback callback) {
- if (counter.getAndIncrement() == 1) {
- callback.onError(new RuntimeException("storage fell over"));
- } else {
- receivedSpans.add(spans);
- callback.onSuccess(null);
- }
- }
-
- @Override public Call clone() {
- throw new AssertionError();
- }
- };
- final StorageComponent storage = buildStorage(consumer);
- RabbitMQCollector.Builder builder = builder("storage_exception").storage(storage);
-
- produceSpans(THRIFT.encodeList(spans), builder.queue);
- produceSpans(THRIFT.encodeList(spans), builder.queue); // tossed on error
- produceSpans(THRIFT.encodeList(spans), builder.queue);
-
- try (RabbitMQCollector collector = builder.build()) {
- collector.start();
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- // the only way we could read this, is if the malformed span was skipped.
- assertThat(receivedSpans.take()).containsExactlyElementsOf(spans);
- }
-
- assertThat(rabbitmqMetrics.messages()).isEqualTo(3);
- assertThat(rabbitmqMetrics.messagesDropped()).isZero(); // storage failure isn't a message failure
- assertThat(rabbitmqMetrics.bytes()).isEqualTo(THRIFT.encodeList(spans).length * 3);
- assertThat(rabbitmqMetrics.spans()).isEqualTo(spans.size() * 3);
- assertThat(rabbitmqMetrics.spansDropped()).isEqualTo(spans.size()); // only one dropped
- }
-
- /**
- * The {@code toString()} of {@link Component} implementations appear in health check endpoints.
- * Since these are likely to be exposed in logs and other monitoring tools, care should be taken
- * to ensure {@code toString()} output is a reasonable length and does not contain sensitive
- * information.
- */
- @Test void toStringContainsOnlySummaryInformation() throws Exception {
- try (RabbitMQCollector collector = builder("bugs bunny").build()) {
- collector.start();
-
- assertThat(collector).hasToString(
- String.format("RabbitMQCollector{addresses=[%s:%s], queue=%s}", rabbit.host(),
- rabbit.port(), "bugs bunny")
- );
- }
- }
-
- void produceSpans(byte[] spans, String queue) throws Exception {
- Channel channel = null;
- try {
- channel = connection.createChannel();
- channel.basicPublish("", queue, null, spans);
- } finally {
- if (channel != null) channel.close();
- }
- }
-
- RabbitMQCollector.Builder builder(String queue) {
- return rabbit.newCollectorBuilder(queue)
- .metrics(metrics)
- .storage(buildStorage(consumer));
- }
-
- static StorageComponent buildStorage(final SpanConsumer spanConsumer) {
- return new ForwardingStorageComponent() {
- @Override protected StorageComponent delegate() {
- throw new AssertionError();
- }
-
- @Override public SpanConsumer spanConsumer() {
- return spanConsumer;
- }
- };
- }
-}
diff --git a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQCollectorTest.java b/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQCollectorTest.java
deleted file mode 100644
index cb390f1d6bc..00000000000
--- a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQCollectorTest.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.rabbitmq;
-
-import com.rabbitmq.client.ConnectionFactory;
-import java.io.UncheckedIOException;
-import org.junit.Before;
-import org.junit.Test;
-import zipkin2.CheckResult;
-import zipkin2.Component;
-
-import static java.util.Arrays.asList;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.assertThatThrownBy;
-
-public class RabbitMQCollectorTest {
-
- RabbitMQCollector collector;
-
- @Before public void before() {
- ConnectionFactory connectionFactory = new ConnectionFactory();
- connectionFactory.setConnectionTimeout(100);
- // We can be pretty certain RabbitMQ isn't running on localhost port 80
- collector = RabbitMQCollector.builder()
- .connectionFactory(connectionFactory).addresses(asList("localhost:80")).build();
- }
-
- @Test public void checkFalseWhenRabbitMQIsDown() {
- CheckResult check = collector.check();
- assertThat(check.ok()).isFalse();
- assertThat(check.error()).isInstanceOf(UncheckedIOException.class);
- }
-
- @Test public void startFailsWhenRabbitMQIsDown() {
- // NOTE.. This is probably not good as it can crash on transient failure..
- assertThatThrownBy(collector::start)
- .isInstanceOf(UncheckedIOException.class)
- .hasMessageStartingWith("Unable to establish connection to RabbitMQ server");
- }
-
- /**
- * The {@code toString()} of {@link Component} implementations appear in health check endpoints.
- * Since these are likely to be exposed in logs and other monitoring tools, care should be taken
- * to ensure {@code toString()} output is a reasonable length and does not contain sensitive
- * information.
- */
- @Test public void toStringContainsOnlySummaryInformation() {
- assertThat(collector).hasToString(
- "RabbitMQCollector{addresses=[localhost:80], queue=zipkin}"
- );
- }
-}
diff --git a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQExtension.java b/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQExtension.java
deleted file mode 100644
index 3944e4ca137..00000000000
--- a/zipkin-collector/rabbitmq/src/test/java/zipkin2/collector/rabbitmq/RabbitMQExtension.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.rabbitmq;
-
-import java.time.Duration;
-import org.junit.jupiter.api.extension.AfterAllCallback;
-import org.junit.jupiter.api.extension.BeforeAllCallback;
-import org.junit.jupiter.api.extension.ExtensionContext;
-import org.opentest4j.TestAbortedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testcontainers.containers.Container.ExecResult;
-import org.testcontainers.containers.GenericContainer;
-import org.testcontainers.containers.wait.strategy.Wait;
-
-import static java.util.Arrays.asList;
-import static org.testcontainers.utility.DockerImageName.parse;
-import static zipkin2.Call.propagateIfFatal;
-
-class RabbitMQExtension implements BeforeAllCallback, AfterAllCallback {
- static final Logger LOGGER = LoggerFactory.getLogger(RabbitMQExtension.class);
- static final int RABBIT_PORT = 5672;
-
- RabbitMQContainer container = new RabbitMQContainer();
-
- @Override public void beforeAll(ExtensionContext context) {
- if (context.getRequiredTestClass().getEnclosingClass() != null) {
- // Only run once in outermost scope.
- return;
- }
-
- container.start();
- LOGGER.info("Using hostPort " + host() + ":" + port());
- }
-
- @Override public void afterAll(ExtensionContext context) {
- if (context.getRequiredTestClass().getEnclosingClass() != null) {
- // Only run once in outermost scope.
- return;
- }
-
- container.stop();
- }
-
- RabbitMQCollector.Builder newCollectorBuilder(String queue) {
- declareQueue(queue);
- return RabbitMQCollector.builder().queue(queue).addresses(asList(host() + ":" + port()));
- }
-
- void declareQueue(String queue) {
- ExecResult result;
- try {
- result = container.execInContainer("rabbitmqadmin", "declare", "queue", "name=" + queue);
- } catch (Throwable e) {
- propagateIfFatal(e);
- throw new TestAbortedException(
- "Couldn't declare queue " + queue + ": " + e.getMessage(), e);
- }
- if (result.getExitCode() != 0) {
- throw new TestAbortedException("Couldn't declare queue " + queue + ": " + result);
- }
- }
-
- String host() {
- return container.getHost();
- }
-
- int port() {
- return container.getMappedPort(RABBIT_PORT);
- }
-
- // mostly waiting for https://github.com/testcontainers/testcontainers-java/issues/3537
- static final class RabbitMQContainer extends GenericContainer {
- RabbitMQContainer() {
- super(parse("ghcr.io/openzipkin/rabbitmq-management-alpine:latest"));
- if ("true".equals(System.getProperty("docker.skip"))) {
- throw new TestAbortedException("${docker.skip} == true");
- }
- withExposedPorts(RABBIT_PORT); // rabbit's image doesn't expose any port
- waitStrategy = Wait.forLogMessage(".*Server startup complete.*", 1);
- withStartupTimeout(Duration.ofSeconds(60));
- }
- }
-}
diff --git a/zipkin-collector/rabbitmq/src/test/resources/simplelogger.properties b/zipkin-collector/rabbitmq/src/test/resources/simplelogger.properties
deleted file mode 100644
index 722851b466c..00000000000
--- a/zipkin-collector/rabbitmq/src/test/resources/simplelogger.properties
+++ /dev/null
@@ -1,9 +0,0 @@
-# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options
-
-org.slf4j.simpleLogger.logFile=System.out
-org.slf4j.simpleLogger.defaultLogLevel=warn
-org.slf4j.simpleLogger.showDateTime=true
-org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS
-
-# stop huge spam
-org.slf4j.simpleLogger.log.org.testcontainers.dockerclient=off
diff --git a/zipkin-collector/scribe/README.md b/zipkin-collector/scribe/README.md
deleted file mode 100644
index e9047717137..00000000000
--- a/zipkin-collector/scribe/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# collector-scribe
-
-## ScribeCollector
-This collector accepts Scribe logs in a specified category. Each log
-entry is expected to contain a single span, which is TBinaryProtocol
-big-endian, then base64 encoded. These spans are then pushed to storage.
-
-`zipkin2.collector.scribe.ScribeCollector.Builder` includes defaults that will
-listen on port 9410, accept log entries in the category "zipkin"
-
-## Encoding
-The scribe message is a TBinaryProtocol big-endian, then Base64 span.
-Base64 Basic and MIME schemes are supported.
-
-Here's what it looks like in pseudocode
-```
-serialized = writeTBinaryProtocol(span)
-encoded = base64(serialized)
-
-scribe.log(category = "zipkin", message = encoded)
-```
diff --git a/zipkin-collector/scribe/pom.xml b/zipkin-collector/scribe/pom.xml
deleted file mode 100644
index e3f0a24efcd..00000000000
--- a/zipkin-collector/scribe/pom.xml
+++ /dev/null
@@ -1,71 +0,0 @@
-
-
-
- 4.0.0
-
-
- io.zipkin.zipkin2
- zipkin-collector-parent
- 2.24.4-SNAPSHOT
-
-
- zipkin-collector-scribe
- Collector: Scribe (Legacy)
-
-
- ${project.basedir}/../..
-
-
- -XepDisableWarningsInGeneratedCode
-
-
-
-
- ${project.groupId}
- zipkin-collector
- ${project.version}
-
-
-
- ${armeria.groupId}
- armeria-thrift0.15
- ${armeria.version}
-
-
-
-
- javax.annotation
- javax.annotation-api
- ${javax-annotation-api.version}
- provided
-
-
-
- ${armeria.groupId}
- armeria-junit4
- ${armeria.version}
- test
-
-
-
- org.awaitility
- awaitility
- ${awaitility.version}
- test
-
-
-
diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/NettyScribeServer.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/NettyScribeServer.java
deleted file mode 100644
index 0af84b1a680..00000000000
--- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/NettyScribeServer.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.scribe;
-
-import com.linecorp.armeria.common.CommonPools;
-import com.linecorp.armeria.common.util.EventLoopGroups;
-import io.netty.bootstrap.ServerBootstrap;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.socket.SocketChannel;
-import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
-import java.net.InetSocketAddress;
-
-import static zipkin2.Call.propagateIfFatal;
-
-final class NettyScribeServer {
- final int port;
- final ScribeSpanConsumer scribe;
-
- volatile EventLoopGroup bossGroup;
- volatile Channel channel;
-
- NettyScribeServer(int port, ScribeSpanConsumer scribe) {
- this.port = port;
- this.scribe = scribe;
- }
-
- void start() {
- bossGroup = EventLoopGroups.newEventLoopGroup(1);
- EventLoopGroup workerGroup = CommonPools.workerGroup();
-
- ServerBootstrap b = new ServerBootstrap();
- try {
- channel = b.group(bossGroup, workerGroup)
- .channel(EventLoopGroups.serverChannelType(bossGroup))
- .childHandler(new ChannelInitializer() {
- @Override protected void initChannel(SocketChannel ch) {
- ch.pipeline()
- .addLast(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4))
- .addLast(new ScribeInboundHandler(scribe));
- }
- })
- .bind(port)
- .syncUninterruptibly()
- .channel();
- } catch (Throwable t) {
- propagateIfFatal(t);
- throw new RuntimeException("Could not start scribe server.", t);
- }
- }
-
- @SuppressWarnings("FutureReturnValueIgnored")
- void close() {
- if (channel == null) return;
- // TODO: chain these futures, and probably block a bit
- // https://line-armeria.slack.com/archives/C1NGPBUH2/p1591167918430500
- channel.close();
- bossGroup.shutdownGracefully();
- }
-
- boolean isRunning() {
- return channel != null && channel.isActive();
- }
-
- int port() {
- if (channel == null) return 0;
- return ((InetSocketAddress) channel.localAddress()).getPort();
- }
-}
diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeCollector.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeCollector.java
deleted file mode 100644
index af412941024..00000000000
--- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeCollector.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.scribe;
-
-import zipkin2.CheckResult;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorComponent;
-import zipkin2.collector.CollectorMetrics;
-import zipkin2.collector.CollectorSampler;
-import zipkin2.storage.SpanConsumer;
-import zipkin2.storage.StorageComponent;
-
-/**
- * This collector accepts Scribe logs in a specified category. Each log entry is expected to contain
- * a single span, which is TBinaryProtocol big-endian, then base64 encoded. These spans are chained
- * to an {@link SpanConsumer#accept asynchronous span consumer}.
- */
-public final class ScribeCollector extends CollectorComponent {
-
- public static Builder newBuilder() {
- return new Builder();
- }
-
- /** Configuration including defaults needed to receive spans from a Scribe category. */
- public static final class Builder extends CollectorComponent.Builder {
- Collector.Builder delegate = Collector.newBuilder(ScribeCollector.class);
- CollectorMetrics metrics = CollectorMetrics.NOOP_METRICS;
- String category = "zipkin";
- int port = 9410;
-
- @Override public Builder storage(StorageComponent storage) {
- delegate.storage(storage);
- return this;
- }
-
- @Override public Builder metrics(CollectorMetrics metrics) {
- if (metrics == null) throw new NullPointerException("metrics == null");
- this.metrics = metrics.forTransport("scribe");
- delegate.metrics(this.metrics);
- return this;
- }
-
- @Override public Builder sampler(CollectorSampler sampler) {
- delegate.sampler(sampler);
- return this;
- }
-
- /** Category zipkin spans will be consumed from. Defaults to "zipkin" */
- public Builder category(String category) {
- if (category == null) throw new NullPointerException("category == null");
- this.category = category;
- return this;
- }
-
- /** The port to listen on. Defaults to 9410 */
- public Builder port(int port) {
- this.port = port;
- return this;
- }
-
- @Override public ScribeCollector build() {
- return new ScribeCollector(this);
- }
- }
-
- final NettyScribeServer server;
-
- ScribeCollector(Builder builder) {
- server = new NettyScribeServer(builder.port,
- new ScribeSpanConsumer(builder.delegate.build(), builder.metrics, builder.category));
- }
-
- /** Will throw an exception if the {@link Builder#port(int) port} is already in use. */
- @Override public ScribeCollector start() {
- server.start();
- return this;
- }
-
- @Override public CheckResult check() {
- if (!server.isRunning()) {
- return CheckResult.failed(new IllegalStateException("server not running"));
- }
- return CheckResult.OK;
- }
-
- /** Returns zero until {@link #start()} was called. */
- public int port() {
- return server.port();
- }
-
- @Override public final String toString() {
- return "ScribeCollector{port=" + port() + ", category=" + server.scribe.category + "}";
- }
-
- @Override public void close() {
- server.close();
- }
-}
diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeInboundHandler.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeInboundHandler.java
deleted file mode 100644
index 12e63f889aa..00000000000
--- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeInboundHandler.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.scribe;
-
-import com.linecorp.armeria.common.HttpData;
-import com.linecorp.armeria.common.HttpHeaderNames;
-import com.linecorp.armeria.common.HttpMethod;
-import com.linecorp.armeria.common.HttpRequest;
-import com.linecorp.armeria.common.HttpResponse;
-import com.linecorp.armeria.common.RequestHeaders;
-import com.linecorp.armeria.common.util.Exceptions;
-import com.linecorp.armeria.common.util.SafeCloseable;
-import com.linecorp.armeria.server.ServiceRequestContext;
-import com.linecorp.armeria.server.ServiceRequestContextBuilder;
-import com.linecorp.armeria.server.thrift.THttpService;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelFutureListener;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelInboundHandlerAdapter;
-import io.netty.channel.EventLoop;
-import java.util.HashMap;
-import java.util.Map;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static zipkin2.Call.propagateIfFatal;
-
-@SuppressWarnings("FutureReturnValueIgnored")
-// TODO: errorprone wants us to check futures before returning, but what would be a sensible check?
-// Say it is somehow canceled, would we take action? Would callback.onError() be redundant?
-final class ScribeInboundHandler extends ChannelInboundHandlerAdapter {
-
- static final Logger logger = LoggerFactory.getLogger(ScribeInboundHandler.class);
-
- // Headers mostly copied from https://github.com/apache/thrift/blob/master/lib/javame/src/org/apache/thrift/transport/THttpClient.java#L130
- static final RequestHeaders THRIFT_HEADERS = RequestHeaders.builder(
- HttpMethod.POST, "/internal/zipkin-thriftrpc")
- .set(HttpHeaderNames.CONTENT_TYPE, "application/x-thrift")
- .set(HttpHeaderNames.ACCEPT, "application/x-thrift")
- .set(HttpHeaderNames.USER_AGENT, "Zipkin/ScribeInboundHandler")
- .build();
-
- final THttpService scribeService;
-
- ScribeInboundHandler(ScribeSpanConsumer scribe) {
- scribeService = THttpService.of(scribe);
- }
-
- Map pendingResponses = new HashMap<>();
- int nextResponseIndex = 0;
- int previouslySentResponseIndex = -1;
-
- @Override public void channelRead(ChannelHandlerContext ctx, Object payload) {
- assert payload instanceof ByteBuf;
- HttpRequest request = HttpRequest.of(THRIFT_HEADERS, HttpData.wrap((ByteBuf) payload));
- ServiceRequestContextBuilder requestContextBuilder = ServiceRequestContext.builder(request)
- .service(scribeService)
- .alloc(ctx.alloc());
-
- if (ctx.executor() instanceof EventLoop) {
- requestContextBuilder.eventLoop((EventLoop) ctx.executor());
- }
-
- ServiceRequestContext requestContext = requestContextBuilder.build();
-
- final HttpResponse response;
- try (SafeCloseable unused = requestContext.push()) {
- response = HttpResponse.of(scribeService.serve(requestContext, request));
- } catch (Throwable t) {
- propagateIfFatal(t);
- exceptionCaught(ctx, t);
- return;
- }
-
- int responseIndex = nextResponseIndex++;
-
- response.aggregateWithPooledObjects(ctx.executor(), ctx.alloc()).handle((msg, t) -> {
- if (t != null) {
- exceptionCaught(ctx, t);
- return null;
- }
-
- try (HttpData content = msg.content()) {
- ByteBuf returned = ctx.alloc().buffer(content.length() + 4);
- returned.writeInt(content.length());
- returned.writeBytes(content.byteBuf());
- if (responseIndex == previouslySentResponseIndex + 1) {
- ctx.writeAndFlush(returned);
- previouslySentResponseIndex++;
-
- flushResponses(ctx);
- } else {
- pendingResponses.put(responseIndex, returned);
- }
- }
-
- return null;
- });
- }
-
- @Override public void channelInactive(ChannelHandlerContext ctx) {
- release();
- }
-
- @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
- Exceptions.logIfUnexpected(logger, ctx.channel(), cause);
-
- release();
- closeOnFlush(ctx.channel());
- }
-
- void flushResponses(ChannelHandlerContext ctx) {
- while (!pendingResponses.isEmpty()) {
- ByteBuf response = pendingResponses.remove(previouslySentResponseIndex + 1);
- if (response == null) {
- return;
- }
-
- ctx.writeAndFlush(response);
- previouslySentResponseIndex++;
- }
- }
-
- void release() {
- pendingResponses.values().forEach(ByteBuf::release);
- pendingResponses.clear();
- }
-
- /**
- * Closes the specified channel after all queued write requests are flushed.
- */
- static void closeOnFlush(Channel ch) {
- if (ch.isActive()) {
- ch.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE);
- }
- }
-}
diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeSpanConsumer.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeSpanConsumer.java
deleted file mode 100644
index 2863a855a47..00000000000
--- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/ScribeSpanConsumer.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.scribe;
-
-import com.linecorp.armeria.common.CommonPools;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Base64;
-import java.util.List;
-import org.apache.thrift.async.AsyncMethodCallback;
-import zipkin2.Callback;
-import zipkin2.Span;
-import zipkin2.codec.SpanBytesDecoder;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorMetrics;
-import zipkin2.collector.scribe.generated.LogEntry;
-import zipkin2.collector.scribe.generated.ResultCode;
-import zipkin2.collector.scribe.generated.Scribe;
-
-final class ScribeSpanConsumer implements Scribe.AsyncIface {
- final Collector collector;
- final CollectorMetrics metrics;
- final String category;
-
- ScribeSpanConsumer(Collector collector, CollectorMetrics metrics, String category) {
- this.collector = collector;
- this.metrics = metrics;
- this.category = category;
- }
-
- @Override
- public void Log(List messages, AsyncMethodCallback resultHandler) {
- metrics.incrementMessages();
- List spans = new ArrayList<>();
- int byteCount = 0;
- try {
- for (LogEntry logEntry : messages) {
- if (!category.equals(logEntry.category)) continue;
- byte[] bytes = logEntry.message.getBytes(StandardCharsets.ISO_8859_1);
- bytes = Base64.getMimeDecoder().decode(bytes); // finagle-zipkin uses mime encoding
- byteCount += bytes.length;
- spans.add(SpanBytesDecoder.THRIFT.decodeOne(bytes));
- }
- } catch (RuntimeException e) {
- metrics.incrementMessagesDropped();
- resultHandler.onError(e);
- return;
- } finally {
- metrics.incrementBytes(byteCount);
- }
-
- collector.accept(spans, new Callback() {
- @Override public void onSuccess(Void value) {
- resultHandler.onComplete(ResultCode.OK);
- }
-
- @Override public void onError(Throwable t) {
- Exception error = t instanceof Exception ? (Exception) t : new RuntimeException(t);
- resultHandler.onError(error);
- }
- // Collectors may not be asynchronous so switch to blocking executor here.
- }, CommonPools.blockingTaskExecutor());
- }
-}
diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/LogEntry.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/LogEntry.java
deleted file mode 100644
index 395867f8f04..00000000000
--- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/LogEntry.java
+++ /dev/null
@@ -1,482 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.12.0)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- * @generated
- */
-package zipkin2.collector.scribe.generated;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
-@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.12.0)", date = "2019-05-07")
-public class LogEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LogEntry");
-
- private static final org.apache.thrift.protocol.TField CATEGORY_FIELD_DESC = new org.apache.thrift.protocol.TField("category", org.apache.thrift.protocol.TType.STRING, (short)1);
- private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)2);
-
- private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new LogEntryStandardSchemeFactory();
- private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new LogEntryTupleSchemeFactory();
-
- public @org.apache.thrift.annotation.Nullable java.lang.String category; // required
- public @org.apache.thrift.annotation.Nullable java.lang.String message; // required
-
- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
- public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- CATEGORY((short)1, "category"),
- MESSAGE((short)2, "message");
-
- private static final java.util.Map byName = new java.util.HashMap();
-
- static {
- for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
- byName.put(field.getFieldName(), field);
- }
- }
-
- /**
- * Find the _Fields constant that matches fieldId, or null if its not found.
- */
- @org.apache.thrift.annotation.Nullable
- public static _Fields findByThriftId(int fieldId) {
- switch(fieldId) {
- case 1: // CATEGORY
- return CATEGORY;
- case 2: // MESSAGE
- return MESSAGE;
- default:
- return null;
- }
- }
-
- /**
- * Find the _Fields constant that matches fieldId, throwing an exception
- * if it is not found.
- */
- public static _Fields findByThriftIdOrThrow(int fieldId) {
- _Fields fields = findByThriftId(fieldId);
- if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
- return fields;
- }
-
- /**
- * Find the _Fields constant that matches name, or null if its not found.
- */
- @org.apache.thrift.annotation.Nullable
- public static _Fields findByName(java.lang.String name) {
- return byName.get(name);
- }
-
- private final short _thriftId;
- private final java.lang.String _fieldName;
-
- _Fields(short thriftId, java.lang.String fieldName) {
- _thriftId = thriftId;
- _fieldName = fieldName;
- }
-
- public short getThriftFieldId() {
- return _thriftId;
- }
-
- public java.lang.String getFieldName() {
- return _fieldName;
- }
- }
-
- // isset id assignments
- public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
- static {
- java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.CATEGORY, new org.apache.thrift.meta_data.FieldMetaData("category", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LogEntry.class, metaDataMap);
- }
-
- public LogEntry() {
- }
-
- public LogEntry(
- java.lang.String category,
- java.lang.String message)
- {
- this();
- this.category = category;
- this.message = message;
- }
-
- /**
- * Performs a deep copy on other.
- */
- public LogEntry(LogEntry other) {
- if (other.isSetCategory()) {
- this.category = other.category;
- }
- if (other.isSetMessage()) {
- this.message = other.message;
- }
- }
-
- public LogEntry deepCopy() {
- return new LogEntry(this);
- }
-
- @Override
- public void clear() {
- this.category = null;
- this.message = null;
- }
-
- @org.apache.thrift.annotation.Nullable
- public java.lang.String getCategory() {
- return this.category;
- }
-
- public LogEntry setCategory(@org.apache.thrift.annotation.Nullable java.lang.String category) {
- this.category = category;
- return this;
- }
-
- public void unsetCategory() {
- this.category = null;
- }
-
- /** Returns true if field category is set (has been assigned a value) and false otherwise */
- public boolean isSetCategory() {
- return this.category != null;
- }
-
- public void setCategoryIsSet(boolean value) {
- if (!value) {
- this.category = null;
- }
- }
-
- @org.apache.thrift.annotation.Nullable
- public java.lang.String getMessage() {
- return this.message;
- }
-
- public LogEntry setMessage(@org.apache.thrift.annotation.Nullable java.lang.String message) {
- this.message = message;
- return this;
- }
-
- public void unsetMessage() {
- this.message = null;
- }
-
- /** Returns true if field message is set (has been assigned a value) and false otherwise */
- public boolean isSetMessage() {
- return this.message != null;
- }
-
- public void setMessageIsSet(boolean value) {
- if (!value) {
- this.message = null;
- }
- }
-
- public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
- switch (field) {
- case CATEGORY:
- if (value == null) {
- unsetCategory();
- } else {
- setCategory((java.lang.String)value);
- }
- break;
-
- case MESSAGE:
- if (value == null) {
- unsetMessage();
- } else {
- setMessage((java.lang.String)value);
- }
- break;
-
- }
- }
-
- @org.apache.thrift.annotation.Nullable
- public java.lang.Object getFieldValue(_Fields field) {
- switch (field) {
- case CATEGORY:
- return getCategory();
-
- case MESSAGE:
- return getMessage();
-
- }
- throw new java.lang.IllegalStateException();
- }
-
- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
- public boolean isSet(_Fields field) {
- if (field == null) {
- throw new java.lang.IllegalArgumentException();
- }
-
- switch (field) {
- case CATEGORY:
- return isSetCategory();
- case MESSAGE:
- return isSetMessage();
- }
- throw new java.lang.IllegalStateException();
- }
-
- @Override
- public boolean equals(java.lang.Object that) {
- if (that == null)
- return false;
- if (that instanceof LogEntry)
- return this.equals((LogEntry)that);
- return false;
- }
-
- public boolean equals(LogEntry that) {
- if (that == null)
- return false;
- if (this == that)
- return true;
-
- boolean this_present_category = true && this.isSetCategory();
- boolean that_present_category = true && that.isSetCategory();
- if (this_present_category || that_present_category) {
- if (!(this_present_category && that_present_category))
- return false;
- if (!this.category.equals(that.category))
- return false;
- }
-
- boolean this_present_message = true && this.isSetMessage();
- boolean that_present_message = true && that.isSetMessage();
- if (this_present_message || that_present_message) {
- if (!(this_present_message && that_present_message))
- return false;
- if (!this.message.equals(that.message))
- return false;
- }
-
- return true;
- }
-
- @Override
- public int hashCode() {
- int hashCode = 1;
-
- hashCode = hashCode * 8191 + ((isSetCategory()) ? 131071 : 524287);
- if (isSetCategory())
- hashCode = hashCode * 8191 + category.hashCode();
-
- hashCode = hashCode * 8191 + ((isSetMessage()) ? 131071 : 524287);
- if (isSetMessage())
- hashCode = hashCode * 8191 + message.hashCode();
-
- return hashCode;
- }
-
- @Override
- public int compareTo(LogEntry other) {
- if (!getClass().equals(other.getClass())) {
- return getClass().getName().compareTo(other.getClass().getName());
- }
-
- int lastComparison = 0;
-
- lastComparison = java.lang.Boolean.valueOf(isSetCategory()).compareTo(other.isSetCategory());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetCategory()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.category, other.category);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
- lastComparison = java.lang.Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetMessage()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
- return 0;
- }
-
- @org.apache.thrift.annotation.Nullable
- public _Fields fieldForId(int fieldId) {
- return _Fields.findByThriftId(fieldId);
- }
-
- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
- scheme(iprot).read(iprot, this);
- }
-
- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
- scheme(oprot).write(oprot, this);
- }
-
- @Override
- public java.lang.String toString() {
- java.lang.StringBuilder sb = new java.lang.StringBuilder("LogEntry(");
- boolean first = true;
-
- sb.append("category:");
- if (this.category == null) {
- sb.append("null");
- } else {
- sb.append(this.category);
- }
- first = false;
- if (!first) sb.append(", ");
- sb.append("message:");
- if (this.message == null) {
- sb.append("null");
- } else {
- sb.append(this.message);
- }
- first = false;
- sb.append(")");
- return sb.toString();
- }
-
- public void validate() throws org.apache.thrift.TException {
- // check for required fields
- // check for sub-struct validity
- }
-
- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
- try {
- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
- } catch (org.apache.thrift.TException te) {
- throw new java.io.IOException(te);
- }
- }
-
- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
- try {
- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
- } catch (org.apache.thrift.TException te) {
- throw new java.io.IOException(te);
- }
- }
-
- private static class LogEntryStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
- public LogEntryStandardScheme getScheme() {
- return new LogEntryStandardScheme();
- }
- }
-
- private static class LogEntryStandardScheme extends org.apache.thrift.scheme.StandardScheme {
-
- public void read(org.apache.thrift.protocol.TProtocol iprot, LogEntry struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TField schemeField;
- iprot.readStructBegin();
- while (true)
- {
- schemeField = iprot.readFieldBegin();
- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
- break;
- }
- switch (schemeField.id) {
- case 1: // CATEGORY
- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
- struct.category = iprot.readString();
- struct.setCategoryIsSet(true);
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- break;
- case 2: // MESSAGE
- if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
- struct.message = iprot.readString();
- struct.setMessageIsSet(true);
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- break;
- default:
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- iprot.readFieldEnd();
- }
- iprot.readStructEnd();
-
- // check for required fields of primitive type, which can't be checked in the validate method
- struct.validate();
- }
-
- public void write(org.apache.thrift.protocol.TProtocol oprot, LogEntry struct) throws org.apache.thrift.TException {
- struct.validate();
-
- oprot.writeStructBegin(STRUCT_DESC);
- if (struct.category != null) {
- oprot.writeFieldBegin(CATEGORY_FIELD_DESC);
- oprot.writeString(struct.category);
- oprot.writeFieldEnd();
- }
- if (struct.message != null) {
- oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
- oprot.writeString(struct.message);
- oprot.writeFieldEnd();
- }
- oprot.writeFieldStop();
- oprot.writeStructEnd();
- }
-
- }
-
- private static class LogEntryTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
- public LogEntryTupleScheme getScheme() {
- return new LogEntryTupleScheme();
- }
- }
-
- private static class LogEntryTupleScheme extends org.apache.thrift.scheme.TupleScheme {
-
- @Override
- public void write(org.apache.thrift.protocol.TProtocol prot, LogEntry struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
- java.util.BitSet optionals = new java.util.BitSet();
- if (struct.isSetCategory()) {
- optionals.set(0);
- }
- if (struct.isSetMessage()) {
- optionals.set(1);
- }
- oprot.writeBitSet(optionals, 2);
- if (struct.isSetCategory()) {
- oprot.writeString(struct.category);
- }
- if (struct.isSetMessage()) {
- oprot.writeString(struct.message);
- }
- }
-
- @Override
- public void read(org.apache.thrift.protocol.TProtocol prot, LogEntry struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
- java.util.BitSet incoming = iprot.readBitSet(2);
- if (incoming.get(0)) {
- struct.category = iprot.readString();
- struct.setCategoryIsSet(true);
- }
- if (incoming.get(1)) {
- struct.message = iprot.readString();
- struct.setMessageIsSet(true);
- }
- }
- }
-
- private static S scheme(org.apache.thrift.protocol.TProtocol proto) {
- return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
- }
-}
-
diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/ResultCode.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/ResultCode.java
deleted file mode 100644
index e7f65542c24..00000000000
--- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/ResultCode.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.12.0)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- * @generated
- */
-package zipkin2.collector.scribe.generated;
-
-
-@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.12.0)", date = "2019-05-07")
-public enum ResultCode implements org.apache.thrift.TEnum {
- OK(0),
- TRY_LATER(1);
-
- private final int value;
-
- private ResultCode(int value) {
- this.value = value;
- }
-
- /**
- * Get the integer value of this enum value, as defined in the Thrift IDL.
- */
- public int getValue() {
- return value;
- }
-
- /**
- * Find a the enum type by its integer value, as defined in the Thrift IDL.
- * @return null if the value is not found.
- */
- @org.apache.thrift.annotation.Nullable
- public static ResultCode findByValue(int value) {
- switch (value) {
- case 0:
- return OK;
- case 1:
- return TRY_LATER;
- default:
- return null;
- }
- }
-}
diff --git a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/Scribe.java b/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/Scribe.java
deleted file mode 100644
index 56be78c2195..00000000000
--- a/zipkin-collector/scribe/src/main/java/zipkin2/collector/scribe/generated/Scribe.java
+++ /dev/null
@@ -1,1045 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.12.0)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- * @generated
- */
-package zipkin2.collector.scribe.generated;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
-@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.12.0)", date = "2019-05-07")
-public class Scribe {
-
- public interface Iface {
-
- public ResultCode Log(java.util.List messages) throws org.apache.thrift.TException;
-
- }
-
- public interface AsyncIface {
-
- public void Log(java.util.List messages, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
-
- }
-
- public static class Client extends org.apache.thrift.TServiceClient implements Iface {
- public static class Factory implements org.apache.thrift.TServiceClientFactory {
- public Factory() {}
- public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
- return new Client(prot);
- }
- public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
- return new Client(iprot, oprot);
- }
- }
-
- public Client(org.apache.thrift.protocol.TProtocol prot)
- {
- super(prot, prot);
- }
-
- public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
- super(iprot, oprot);
- }
-
- public ResultCode Log(java.util.List messages) throws org.apache.thrift.TException
- {
- send_Log(messages);
- return recv_Log();
- }
-
- public void send_Log(java.util.List messages) throws org.apache.thrift.TException
- {
- Log_args args = new Log_args();
- args.setMessages(messages);
- sendBase("Log", args);
- }
-
- public ResultCode recv_Log() throws org.apache.thrift.TException
- {
- Log_result result = new Log_result();
- receiveBase(result, "Log");
- if (result.isSetSuccess()) {
- return result.success;
- }
- throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "Log failed: unknown result");
- }
-
- }
- public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface {
- public static class Factory implements org.apache.thrift.async.TAsyncClientFactory {
- private org.apache.thrift.async.TAsyncClientManager clientManager;
- private org.apache.thrift.protocol.TProtocolFactory protocolFactory;
- public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
- this.clientManager = clientManager;
- this.protocolFactory = protocolFactory;
- }
- public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
- return new AsyncClient(protocolFactory, clientManager, transport);
- }
- }
-
- public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) {
- super(protocolFactory, clientManager, transport);
- }
-
- public void Log(java.util.List messages, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
- checkReady();
- Log_call method_call = new Log_call(messages, resultHandler, this, ___protocolFactory, ___transport);
- this.___currentMethod = method_call;
- ___manager.call(method_call);
- }
-
- public static class Log_call extends org.apache.thrift.async.TAsyncMethodCall {
- private java.util.List messages;
- public Log_call(java.util.List messages, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
- super(client, protocolFactory, transport, resultHandler, false);
- this.messages = messages;
- }
-
- public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
- prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("Log", org.apache.thrift.protocol.TMessageType.CALL, 0));
- Log_args args = new Log_args();
- args.setMessages(messages);
- args.write(prot);
- prot.writeMessageEnd();
- }
-
- public ResultCode getResult() throws org.apache.thrift.TException {
- if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
- throw new java.lang.IllegalStateException("Method call not finished!");
- }
- org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
- org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
- return (new Client(prot)).recv_Log();
- }
- }
-
- }
-
- public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor {
- private static final org.slf4j.Logger _LOGGER = org.slf4j.LoggerFactory.getLogger(Processor.class.getName());
- public Processor(I iface) {
- super(iface, getProcessMap(new java.util.HashMap>()));
- }
-
- protected Processor(I iface, java.util.Map> processMap) {
- super(iface, getProcessMap(processMap));
- }
-
- private static java.util.Map> getProcessMap(java.util.Map> processMap) {
- processMap.put("Log", new Log());
- return processMap;
- }
-
- public static class Log extends org.apache.thrift.ProcessFunction {
- public Log() {
- super("Log");
- }
-
- public Log_args getEmptyArgsInstance() {
- return new Log_args();
- }
-
- protected boolean isOneway() {
- return false;
- }
-
- @Override
- protected boolean rethrowUnhandledExceptions() {
- return false;
- }
-
- public Log_result getResult(I iface, Log_args args) throws org.apache.thrift.TException {
- Log_result result = new Log_result();
- result.success = iface.Log(args.messages);
- return result;
- }
- }
-
- }
-
- public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor {
- private static final org.slf4j.Logger _LOGGER = org.slf4j.LoggerFactory.getLogger(AsyncProcessor.class.getName());
- public AsyncProcessor(I iface) {
- super(iface, getProcessMap(new java.util.HashMap>()));
- }
-
- protected AsyncProcessor(I iface, java.util.Map> processMap) {
- super(iface, getProcessMap(processMap));
- }
-
- private static java.util.Map> getProcessMap(java.util.Map> processMap) {
- processMap.put("Log", new Log());
- return processMap;
- }
-
- public static class Log extends org.apache.thrift.AsyncProcessFunction {
- public Log() {
- super("Log");
- }
-
- public Log_args getEmptyArgsInstance() {
- return new Log_args();
- }
-
- public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
- final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new org.apache.thrift.async.AsyncMethodCallback() {
- public void onComplete(ResultCode o) {
- Log_result result = new Log_result();
- result.success = o;
- try {
- fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
- } catch (org.apache.thrift.transport.TTransportException e) {
- _LOGGER.error("TTransportException writing to internal frame buffer", e);
- fb.close();
- } catch (java.lang.Exception e) {
- _LOGGER.error("Exception writing to internal frame buffer", e);
- onError(e);
- }
- }
- public void onError(java.lang.Exception e) {
- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
- org.apache.thrift.TSerializable msg;
- Log_result result = new Log_result();
- if (e instanceof org.apache.thrift.transport.TTransportException) {
- _LOGGER.error("TTransportException inside handler", e);
- fb.close();
- return;
- } else if (e instanceof org.apache.thrift.TApplicationException) {
- _LOGGER.error("TApplicationException inside handler", e);
- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
- msg = (org.apache.thrift.TApplicationException)e;
- } else {
- _LOGGER.error("Exception inside handler", e);
- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
- msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
- }
- try {
- fcall.sendResponse(fb,msg,msgType,seqid);
- } catch (java.lang.Exception ex) {
- _LOGGER.error("Exception writing to internal frame buffer", ex);
- fb.close();
- }
- }
- };
- }
-
- protected boolean isOneway() {
- return false;
- }
-
- public void start(I iface, Log_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
- iface.Log(args.messages,resultHandler);
- }
- }
-
- }
-
- public static class Log_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Log_args");
-
- private static final org.apache.thrift.protocol.TField MESSAGES_FIELD_DESC = new org.apache.thrift.protocol.TField("messages", org.apache.thrift.protocol.TType.LIST, (short)1);
-
- private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new Log_argsStandardSchemeFactory();
- private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new Log_argsTupleSchemeFactory();
-
- public @org.apache.thrift.annotation.Nullable java.util.List messages; // required
-
- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
- public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- MESSAGES((short)1, "messages");
-
- private static final java.util.Map byName = new java.util.HashMap();
-
- static {
- for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
- byName.put(field.getFieldName(), field);
- }
- }
-
- /**
- * Find the _Fields constant that matches fieldId, or null if its not found.
- */
- @org.apache.thrift.annotation.Nullable
- public static _Fields findByThriftId(int fieldId) {
- switch(fieldId) {
- case 1: // MESSAGES
- return MESSAGES;
- default:
- return null;
- }
- }
-
- /**
- * Find the _Fields constant that matches fieldId, throwing an exception
- * if it is not found.
- */
- public static _Fields findByThriftIdOrThrow(int fieldId) {
- _Fields fields = findByThriftId(fieldId);
- if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
- return fields;
- }
-
- /**
- * Find the _Fields constant that matches name, or null if its not found.
- */
- @org.apache.thrift.annotation.Nullable
- public static _Fields findByName(java.lang.String name) {
- return byName.get(name);
- }
-
- private final short _thriftId;
- private final java.lang.String _fieldName;
-
- _Fields(short thriftId, java.lang.String fieldName) {
- _thriftId = thriftId;
- _fieldName = fieldName;
- }
-
- public short getThriftFieldId() {
- return _thriftId;
- }
-
- public java.lang.String getFieldName() {
- return _fieldName;
- }
- }
-
- // isset id assignments
- public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
- static {
- java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.MESSAGES, new org.apache.thrift.meta_data.FieldMetaData("messages", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, LogEntry.class))));
- metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Log_args.class, metaDataMap);
- }
-
- public Log_args() {
- }
-
- public Log_args(
- java.util.List messages)
- {
- this();
- this.messages = messages;
- }
-
- /**
- * Performs a deep copy on other.
- */
- public Log_args(Log_args other) {
- if (other.isSetMessages()) {
- java.util.List __this__messages = new java.util.ArrayList(other.messages.size());
- for (LogEntry other_element : other.messages) {
- __this__messages.add(new LogEntry(other_element));
- }
- this.messages = __this__messages;
- }
- }
-
- public Log_args deepCopy() {
- return new Log_args(this);
- }
-
- @Override
- public void clear() {
- this.messages = null;
- }
-
- public int getMessagesSize() {
- return (this.messages == null) ? 0 : this.messages.size();
- }
-
- @org.apache.thrift.annotation.Nullable
- public java.util.Iterator getMessagesIterator() {
- return (this.messages == null) ? null : this.messages.iterator();
- }
-
- public void addToMessages(LogEntry elem) {
- if (this.messages == null) {
- this.messages = new java.util.ArrayList();
- }
- this.messages.add(elem);
- }
-
- @org.apache.thrift.annotation.Nullable
- public java.util.List getMessages() {
- return this.messages;
- }
-
- public Log_args setMessages(@org.apache.thrift.annotation.Nullable java.util.List messages) {
- this.messages = messages;
- return this;
- }
-
- public void unsetMessages() {
- this.messages = null;
- }
-
- /** Returns true if field messages is set (has been assigned a value) and false otherwise */
- public boolean isSetMessages() {
- return this.messages != null;
- }
-
- public void setMessagesIsSet(boolean value) {
- if (!value) {
- this.messages = null;
- }
- }
-
- public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
- switch (field) {
- case MESSAGES:
- if (value == null) {
- unsetMessages();
- } else {
- setMessages((java.util.List)value);
- }
- break;
-
- }
- }
-
- @org.apache.thrift.annotation.Nullable
- public java.lang.Object getFieldValue(_Fields field) {
- switch (field) {
- case MESSAGES:
- return getMessages();
-
- }
- throw new java.lang.IllegalStateException();
- }
-
- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
- public boolean isSet(_Fields field) {
- if (field == null) {
- throw new java.lang.IllegalArgumentException();
- }
-
- switch (field) {
- case MESSAGES:
- return isSetMessages();
- }
- throw new java.lang.IllegalStateException();
- }
-
- @Override
- public boolean equals(java.lang.Object that) {
- if (that == null)
- return false;
- if (that instanceof Log_args)
- return this.equals((Log_args)that);
- return false;
- }
-
- public boolean equals(Log_args that) {
- if (that == null)
- return false;
- if (this == that)
- return true;
-
- boolean this_present_messages = true && this.isSetMessages();
- boolean that_present_messages = true && that.isSetMessages();
- if (this_present_messages || that_present_messages) {
- if (!(this_present_messages && that_present_messages))
- return false;
- if (!this.messages.equals(that.messages))
- return false;
- }
-
- return true;
- }
-
- @Override
- public int hashCode() {
- int hashCode = 1;
-
- hashCode = hashCode * 8191 + ((isSetMessages()) ? 131071 : 524287);
- if (isSetMessages())
- hashCode = hashCode * 8191 + messages.hashCode();
-
- return hashCode;
- }
-
- @Override
- public int compareTo(Log_args other) {
- if (!getClass().equals(other.getClass())) {
- return getClass().getName().compareTo(other.getClass().getName());
- }
-
- int lastComparison = 0;
-
- lastComparison = java.lang.Boolean.valueOf(isSetMessages()).compareTo(other.isSetMessages());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetMessages()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.messages, other.messages);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
- return 0;
- }
-
- @org.apache.thrift.annotation.Nullable
- public _Fields fieldForId(int fieldId) {
- return _Fields.findByThriftId(fieldId);
- }
-
- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
- scheme(iprot).read(iprot, this);
- }
-
- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
- scheme(oprot).write(oprot, this);
- }
-
- @Override
- public java.lang.String toString() {
- java.lang.StringBuilder sb = new java.lang.StringBuilder("Log_args(");
- boolean first = true;
-
- sb.append("messages:");
- if (this.messages == null) {
- sb.append("null");
- } else {
- sb.append(this.messages);
- }
- first = false;
- sb.append(")");
- return sb.toString();
- }
-
- public void validate() throws org.apache.thrift.TException {
- // check for required fields
- // check for sub-struct validity
- }
-
- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
- try {
- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
- } catch (org.apache.thrift.TException te) {
- throw new java.io.IOException(te);
- }
- }
-
- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
- try {
- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
- } catch (org.apache.thrift.TException te) {
- throw new java.io.IOException(te);
- }
- }
-
- private static class Log_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
- public Log_argsStandardScheme getScheme() {
- return new Log_argsStandardScheme();
- }
- }
-
- private static class Log_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
-
- public void read(org.apache.thrift.protocol.TProtocol iprot, Log_args struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TField schemeField;
- iprot.readStructBegin();
- while (true)
- {
- schemeField = iprot.readFieldBegin();
- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
- break;
- }
- switch (schemeField.id) {
- case 1: // MESSAGES
- if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
- {
- org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
- struct.messages = new java.util.ArrayList(_list0.size);
- @org.apache.thrift.annotation.Nullable LogEntry _elem1;
- for (int _i2 = 0; _i2 < _list0.size; ++_i2)
- {
- _elem1 = new LogEntry();
- _elem1.read(iprot);
- struct.messages.add(_elem1);
- }
- iprot.readListEnd();
- }
- struct.setMessagesIsSet(true);
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- break;
- default:
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- iprot.readFieldEnd();
- }
- iprot.readStructEnd();
-
- // check for required fields of primitive type, which can't be checked in the validate method
- struct.validate();
- }
-
- public void write(org.apache.thrift.protocol.TProtocol oprot, Log_args struct) throws org.apache.thrift.TException {
- struct.validate();
-
- oprot.writeStructBegin(STRUCT_DESC);
- if (struct.messages != null) {
- oprot.writeFieldBegin(MESSAGES_FIELD_DESC);
- {
- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.messages.size()));
- for (LogEntry _iter3 : struct.messages)
- {
- _iter3.write(oprot);
- }
- oprot.writeListEnd();
- }
- oprot.writeFieldEnd();
- }
- oprot.writeFieldStop();
- oprot.writeStructEnd();
- }
-
- }
-
- private static class Log_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
- public Log_argsTupleScheme getScheme() {
- return new Log_argsTupleScheme();
- }
- }
-
- private static class Log_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
-
- @Override
- public void write(org.apache.thrift.protocol.TProtocol prot, Log_args struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
- java.util.BitSet optionals = new java.util.BitSet();
- if (struct.isSetMessages()) {
- optionals.set(0);
- }
- oprot.writeBitSet(optionals, 1);
- if (struct.isSetMessages()) {
- {
- oprot.writeI32(struct.messages.size());
- for (LogEntry _iter4 : struct.messages)
- {
- _iter4.write(oprot);
- }
- }
- }
- }
-
- @Override
- public void read(org.apache.thrift.protocol.TProtocol prot, Log_args struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
- java.util.BitSet incoming = iprot.readBitSet(1);
- if (incoming.get(0)) {
- {
- org.apache.thrift.protocol.TList _list5 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.messages = new java.util.ArrayList(_list5.size);
- @org.apache.thrift.annotation.Nullable LogEntry _elem6;
- for (int _i7 = 0; _i7 < _list5.size; ++_i7)
- {
- _elem6 = new LogEntry();
- _elem6.read(iprot);
- struct.messages.add(_elem6);
- }
- }
- struct.setMessagesIsSet(true);
- }
- }
- }
-
- private static S scheme(org.apache.thrift.protocol.TProtocol proto) {
- return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
- }
- }
-
- public static class Log_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Log_result");
-
- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
-
- private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new Log_resultStandardSchemeFactory();
- private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new Log_resultTupleSchemeFactory();
-
- /**
- *
- * @see ResultCode
- */
- public @org.apache.thrift.annotation.Nullable ResultCode success; // required
-
- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
- public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- /**
- *
- * @see ResultCode
- */
- SUCCESS((short)0, "success");
-
- private static final java.util.Map byName = new java.util.HashMap();
-
- static {
- for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
- byName.put(field.getFieldName(), field);
- }
- }
-
- /**
- * Find the _Fields constant that matches fieldId, or null if its not found.
- */
- @org.apache.thrift.annotation.Nullable
- public static _Fields findByThriftId(int fieldId) {
- switch(fieldId) {
- case 0: // SUCCESS
- return SUCCESS;
- default:
- return null;
- }
- }
-
- /**
- * Find the _Fields constant that matches fieldId, throwing an exception
- * if it is not found.
- */
- public static _Fields findByThriftIdOrThrow(int fieldId) {
- _Fields fields = findByThriftId(fieldId);
- if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
- return fields;
- }
-
- /**
- * Find the _Fields constant that matches name, or null if its not found.
- */
- @org.apache.thrift.annotation.Nullable
- public static _Fields findByName(java.lang.String name) {
- return byName.get(name);
- }
-
- private final short _thriftId;
- private final java.lang.String _fieldName;
-
- _Fields(short thriftId, java.lang.String fieldName) {
- _thriftId = thriftId;
- _fieldName = fieldName;
- }
-
- public short getThriftFieldId() {
- return _thriftId;
- }
-
- public java.lang.String getFieldName() {
- return _fieldName;
- }
- }
-
- // isset id assignments
- public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
- static {
- java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ResultCode.class)));
- metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Log_result.class, metaDataMap);
- }
-
- public Log_result() {
- }
-
- public Log_result(
- ResultCode success)
- {
- this();
- this.success = success;
- }
-
- /**
- * Performs a deep copy on other.
- */
- public Log_result(Log_result other) {
- if (other.isSetSuccess()) {
- this.success = other.success;
- }
- }
-
- public Log_result deepCopy() {
- return new Log_result(this);
- }
-
- @Override
- public void clear() {
- this.success = null;
- }
-
- /**
- *
- * @see ResultCode
- */
- @org.apache.thrift.annotation.Nullable
- public ResultCode getSuccess() {
- return this.success;
- }
-
- /**
- *
- * @see ResultCode
- */
- public Log_result setSuccess(@org.apache.thrift.annotation.Nullable ResultCode success) {
- this.success = success;
- return this;
- }
-
- public void unsetSuccess() {
- this.success = null;
- }
-
- /** Returns true if field success is set (has been assigned a value) and false otherwise */
- public boolean isSetSuccess() {
- return this.success != null;
- }
-
- public void setSuccessIsSet(boolean value) {
- if (!value) {
- this.success = null;
- }
- }
-
- public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
- switch (field) {
- case SUCCESS:
- if (value == null) {
- unsetSuccess();
- } else {
- setSuccess((ResultCode)value);
- }
- break;
-
- }
- }
-
- @org.apache.thrift.annotation.Nullable
- public java.lang.Object getFieldValue(_Fields field) {
- switch (field) {
- case SUCCESS:
- return getSuccess();
-
- }
- throw new java.lang.IllegalStateException();
- }
-
- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
- public boolean isSet(_Fields field) {
- if (field == null) {
- throw new java.lang.IllegalArgumentException();
- }
-
- switch (field) {
- case SUCCESS:
- return isSetSuccess();
- }
- throw new java.lang.IllegalStateException();
- }
-
- @Override
- public boolean equals(java.lang.Object that) {
- if (that == null)
- return false;
- if (that instanceof Log_result)
- return this.equals((Log_result)that);
- return false;
- }
-
- public boolean equals(Log_result that) {
- if (that == null)
- return false;
- if (this == that)
- return true;
-
- boolean this_present_success = true && this.isSetSuccess();
- boolean that_present_success = true && that.isSetSuccess();
- if (this_present_success || that_present_success) {
- if (!(this_present_success && that_present_success))
- return false;
- if (!this.success.equals(that.success))
- return false;
- }
-
- return true;
- }
-
- @Override
- public int hashCode() {
- int hashCode = 1;
-
- hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
- if (isSetSuccess())
- hashCode = hashCode * 8191 + success.getValue();
-
- return hashCode;
- }
-
- @Override
- public int compareTo(Log_result other) {
- if (!getClass().equals(other.getClass())) {
- return getClass().getName().compareTo(other.getClass().getName());
- }
-
- int lastComparison = 0;
-
- lastComparison = java.lang.Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetSuccess()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
- return 0;
- }
-
- @org.apache.thrift.annotation.Nullable
- public _Fields fieldForId(int fieldId) {
- return _Fields.findByThriftId(fieldId);
- }
-
- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
- scheme(iprot).read(iprot, this);
- }
-
- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
- scheme(oprot).write(oprot, this);
- }
-
- @Override
- public java.lang.String toString() {
- java.lang.StringBuilder sb = new java.lang.StringBuilder("Log_result(");
- boolean first = true;
-
- sb.append("success:");
- if (this.success == null) {
- sb.append("null");
- } else {
- sb.append(this.success);
- }
- first = false;
- sb.append(")");
- return sb.toString();
- }
-
- public void validate() throws org.apache.thrift.TException {
- // check for required fields
- // check for sub-struct validity
- }
-
- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
- try {
- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
- } catch (org.apache.thrift.TException te) {
- throw new java.io.IOException(te);
- }
- }
-
- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
- try {
- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
- } catch (org.apache.thrift.TException te) {
- throw new java.io.IOException(te);
- }
- }
-
- private static class Log_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
- public Log_resultStandardScheme getScheme() {
- return new Log_resultStandardScheme();
- }
- }
-
- private static class Log_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
-
- public void read(org.apache.thrift.protocol.TProtocol iprot, Log_result struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TField schemeField;
- iprot.readStructBegin();
- while (true)
- {
- schemeField = iprot.readFieldBegin();
- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
- break;
- }
- switch (schemeField.id) {
- case 0: // SUCCESS
- if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
- struct.success = ResultCode.findByValue(iprot.readI32());
- struct.setSuccessIsSet(true);
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- break;
- default:
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- iprot.readFieldEnd();
- }
- iprot.readStructEnd();
-
- // check for required fields of primitive type, which can't be checked in the validate method
- struct.validate();
- }
-
- public void write(org.apache.thrift.protocol.TProtocol oprot, Log_result struct) throws org.apache.thrift.TException {
- struct.validate();
-
- oprot.writeStructBegin(STRUCT_DESC);
- if (struct.success != null) {
- oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
- oprot.writeI32(struct.success.getValue());
- oprot.writeFieldEnd();
- }
- oprot.writeFieldStop();
- oprot.writeStructEnd();
- }
-
- }
-
- private static class Log_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
- public Log_resultTupleScheme getScheme() {
- return new Log_resultTupleScheme();
- }
- }
-
- private static class Log_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
-
- @Override
- public void write(org.apache.thrift.protocol.TProtocol prot, Log_result struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
- java.util.BitSet optionals = new java.util.BitSet();
- if (struct.isSetSuccess()) {
- optionals.set(0);
- }
- oprot.writeBitSet(optionals, 1);
- if (struct.isSetSuccess()) {
- oprot.writeI32(struct.success.getValue());
- }
- }
-
- @Override
- public void read(org.apache.thrift.protocol.TProtocol prot, Log_result struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
- java.util.BitSet incoming = iprot.readBitSet(1);
- if (incoming.get(0)) {
- struct.success = ResultCode.findByValue(iprot.readI32());
- struct.setSuccessIsSet(true);
- }
- }
- }
-
- private static S scheme(org.apache.thrift.protocol.TProtocol proto) {
- return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
- }
- }
-
-}
diff --git a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ITScribeCollector.java b/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ITScribeCollector.java
deleted file mode 100644
index d6cc50c27cc..00000000000
--- a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ITScribeCollector.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright 2015-2021 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.scribe;
-
-import com.linecorp.armeria.common.CommonPools;
-import java.util.Base64;
-import java.util.List;
-import java.util.stream.Collectors;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.layered.TFramedTransport;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransport;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
-import zipkin2.Callback;
-import zipkin2.Span;
-import zipkin2.TestObjects;
-import zipkin2.codec.SpanBytesEncoder;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorMetrics;
-import zipkin2.collector.scribe.generated.LogEntry;
-import zipkin2.collector.scribe.generated.ResultCode;
-import zipkin2.collector.scribe.generated.Scribe;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-class ITScribeCollector {
- static Collector collector;
- static CollectorMetrics metrics;
- static NettyScribeServer server;
-
- @BeforeAll static void startServer() {
- collector = mock(Collector.class);
- doAnswer(invocation -> {
- Callback callback = invocation.getArgument(1);
- callback.onSuccess(null);
- return null;
- }).when(collector).accept(any(), any(), any());
-
- metrics = mock(CollectorMetrics.class);
-
- server = new NettyScribeServer(0, new ScribeSpanConsumer(collector, metrics, "zipkin"));
- server.start();
- }
-
- @AfterAll static void stopServer() {
- server.close();
- }
-
- @Test void normal() throws Exception {
- // Java version of this sample code
- // https://github.com/facebookarchive/scribe/wiki/Logging-Messages
- TTransport transport = new TFramedTransport(new TSocket("localhost", server.port()));
- TProtocol protocol = new TBinaryProtocol(transport, false, false);
- Scribe.Iface client = new Scribe.Client(protocol);
-
- List entries = TestObjects.TRACE.stream()
- .map(ITScribeCollector::logEntry)
- .collect(Collectors.toList());
-
- transport.open();
- try {
- ResultCode code = client.Log(entries);
- assertThat(code).isEqualTo(ResultCode.OK);
-
- code = client.Log(entries);
- assertThat(code).isEqualTo(ResultCode.OK);
- } finally {
- transport.close();
- }
-
- verify(collector, times(2)).accept(eq(TestObjects.TRACE), any(),
- eq(CommonPools.blockingTaskExecutor()));
- verify(metrics, times(2)).incrementMessages();
- }
-
- static LogEntry logEntry(Span span) {
- return new LogEntry()
- .setCategory("zipkin")
- .setMessage(Base64.getMimeEncoder().encodeToString(SpanBytesEncoder.THRIFT.encode(span)));
- }
-}
diff --git a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeCollectorTest.java b/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeCollectorTest.java
deleted file mode 100644
index e950ed324ad..00000000000
--- a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeCollectorTest.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.scribe;
-
-import org.junit.jupiter.api.Test;
-import zipkin2.CheckResult;
-import zipkin2.Component;
-import zipkin2.storage.InMemoryStorage;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.assertThatThrownBy;
-
-class ScribeCollectorTest {
- InMemoryStorage storage = InMemoryStorage.newBuilder().build();
-
- @Test void check_failsWhenNotStarted() {
- try (ScribeCollector scribe = ScribeCollector.newBuilder().storage(storage).port(0).build()) {
-
- CheckResult result = scribe.check();
- assertThat(result.ok()).isFalse();
- assertThat(result.error()).isInstanceOf(IllegalStateException.class);
-
- scribe.start();
- assertThat(scribe.check().ok()).isTrue();
- }
- }
-
- @Test void anonymousPort() {
- try (ScribeCollector scribe = ScribeCollector.newBuilder().storage(storage).port(0).build()) {
-
- assertThat(scribe.port()).isZero();
-
- scribe.start();
- assertThat(scribe.port()).isNotZero();
- }
- }
-
- @Test void start_failsWhenCantBindPort() {
- ScribeCollector.Builder builder = ScribeCollector.newBuilder().storage(storage).port(0);
-
- try (ScribeCollector first = builder.build().start()) {
- try (ScribeCollector samePort = builder.port(first.port()).build()) {
- assertThatThrownBy(samePort::start)
- .isInstanceOf(RuntimeException.class)
- .hasMessage("Could not start scribe server.");
- }
- }
- }
-
- /**
- * The {@code toString()} of {@link Component} implementations appear in health check endpoints.
- * Since these are likely to be exposed in logs and other monitoring tools, care should be taken
- * to ensure {@code toString()} output is a reasonable length and does not contain sensitive
- * information.
- */
- @Test void toStringContainsOnlySummaryInformation() {
- try (ScribeCollector scribe = ScribeCollector.newBuilder().storage(storage).port(0).build()) {
-
- assertThat(scribe.start())
- .hasToString("ScribeCollector{port=" + scribe.port() + ", category=zipkin}");
- }
- }
-}
diff --git a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeSpanConsumerTest.java b/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeSpanConsumerTest.java
deleted file mode 100644
index 1d0ccc493d6..00000000000
--- a/zipkin-collector/scribe/src/test/java/zipkin2/collector/scribe/ScribeSpanConsumerTest.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.collector.scribe;
-
-import java.util.Arrays;
-import java.util.Base64;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.junit.jupiter.api.Test;
-import zipkin2.Call;
-import zipkin2.Callback;
-import zipkin2.Endpoint;
-import zipkin2.Span;
-import zipkin2.codec.SpanBytesEncoder;
-import zipkin2.collector.InMemoryCollectorMetrics;
-import zipkin2.collector.scribe.generated.LogEntry;
-import zipkin2.collector.scribe.generated.ResultCode;
-import zipkin2.storage.ForwardingStorageComponent;
-import zipkin2.storage.InMemoryStorage;
-import zipkin2.storage.SpanConsumer;
-import zipkin2.storage.StorageComponent;
-import zipkin2.v1.V1Span;
-import zipkin2.v1.V1SpanConverter;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.Arrays.asList;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.awaitility.Awaitility.await;
-
-class ScribeSpanConsumerTest {
- // scope to scribe as we aren't creating the consumer with the builder.
- InMemoryCollectorMetrics scribeMetrics = new InMemoryCollectorMetrics().forTransport("scribe");
-
- InMemoryStorage storage = InMemoryStorage.newBuilder().build();
- SpanConsumer consumer = storage.spanConsumer();
-
- static class CaptureAsyncMethodCallback implements AsyncMethodCallback {
-
- ResultCode resultCode;
- Exception error;
-
- CountDownLatch latch = new CountDownLatch(1);
-
- @Override public void onComplete(ResultCode resultCode) {
- this.resultCode = resultCode;
- latch.countDown();
- }
-
- @Override public void onError(Exception error) {
- this.error = error;
- latch.countDown();
- }
- }
-
- static String reallyLongAnnotation;
-
- static {
- char[] as = new char[2048];
- Arrays.fill(as, 'a');
- reallyLongAnnotation = new String(as);
- }
-
- Endpoint zipkinQuery =
- Endpoint.newBuilder().serviceName("zipkin-query").ip("127.0.0.1").port(9411).build();
- Endpoint zipkinQuery0 = zipkinQuery.toBuilder().port(null).build();
-
- V1Span v1 = V1Span.newBuilder()
- .traceId(-6054243957716233329L)
- .name("getTracesByIds")
- .id(-3615651937927048332L)
- .parentId(-6054243957716233329L)
- .addAnnotation(1442493420635000L, "sr", zipkinQuery)
- .addAnnotation(1442493420747000L, reallyLongAnnotation, zipkinQuery)
- .addAnnotation(
- 1442493422583586L,
- "Gc(9,0.PSScavenge,2015-09-17 12:37:02 +0000,304.milliseconds+762.microseconds)",
- zipkinQuery)
- .addAnnotation(1442493422680000L, "ss", zipkinQuery)
- .addBinaryAnnotation("srv/finagle.version", "6.28.0", zipkinQuery0)
- .addBinaryAnnotation("sa", zipkinQuery)
- .addBinaryAnnotation("ca", zipkinQuery.toBuilder().port(63840).build())
- .debug(false)
- .build();
-
- Span v2 = V1SpanConverter.create().convert(v1).get(0);
- byte[] bytes = SpanBytesEncoder.THRIFT.encode(v2);
- String encodedSpan = new String(Base64.getEncoder().encode(bytes), UTF_8);
-
- @Test void entriesWithSpansAreConsumed() throws Exception {
- ScribeSpanConsumer scribe = newScribeSpanConsumer("zipkin", consumer);
-
- LogEntry entry = new LogEntry();
- entry.category = "zipkin";
- entry.message = encodedSpan;
-
- expectSuccess(scribe, entry);
-
- // Storage finishes after callback so wait for it.
- await().untilAsserted(() -> assertThat(storage.getTraces()).containsExactly(asList(v2)));
-
- assertThat(scribeMetrics.messages()).isEqualTo(1);
- assertThat(scribeMetrics.messagesDropped()).isZero();
- assertThat(scribeMetrics.bytes()).isEqualTo(bytes.length);
- assertThat(scribeMetrics.spans()).isEqualTo(1);
- assertThat(scribeMetrics.spansDropped()).isZero();
- }
-
- @Test void entriesWithoutSpansAreSkipped() throws Exception {
- SpanConsumer consumer = (callback) -> {
- throw new AssertionError(); // as we shouldn't get here.
- };
-
- ScribeSpanConsumer scribe = newScribeSpanConsumer("zipkin", consumer);
-
- LogEntry entry = new LogEntry();
- entry.category = "notzipkin";
- entry.message = "hello world";
-
- expectSuccess(scribe, entry);
-
- // Storage finishes after callback so wait for it.
- await().untilAsserted(() -> assertThat(scribeMetrics.messages()).isEqualTo(1));
- assertThat(scribeMetrics.messagesDropped()).isZero();
- assertThat(scribeMetrics.bytes()).isZero();
- assertThat(scribeMetrics.spans()).isZero();
- assertThat(scribeMetrics.spansDropped()).isZero();
- }
-
- private void expectSuccess(ScribeSpanConsumer scribe, LogEntry entry) throws Exception {
- CaptureAsyncMethodCallback callback = new CaptureAsyncMethodCallback();
- scribe.Log(asList(entry), callback);
- callback.latch.await(10, TimeUnit.SECONDS);
- assertThat(callback.resultCode).isEqualTo(ResultCode.OK);
- }
-
- @Test void malformedDataIsDropped() {
- ScribeSpanConsumer scribe = newScribeSpanConsumer("zipkin", consumer);
-
- LogEntry entry = new LogEntry();
- entry.category = "zipkin";
- entry.message = "notbase64";
-
- CaptureAsyncMethodCallback callback = new CaptureAsyncMethodCallback();
- scribe.Log(asList(entry), callback);
- assertThat(callback.error).isInstanceOf(IllegalArgumentException.class);
-
- // Storage finishes after callback so wait for it.
- await().untilAsserted(() -> assertThat(scribeMetrics.messages()).isEqualTo(1));
- assertThat(scribeMetrics.messagesDropped()).isEqualTo(1);
- assertThat(scribeMetrics.bytes()).isZero();
- assertThat(scribeMetrics.spans()).isZero();
- assertThat(scribeMetrics.spansDropped()).isZero();
- }
-
- @Test void consumerExceptionBeforeCallbackDoesntSetFutureException() {
- consumer = (input) -> {
- throw new NullPointerException("endpoint was null");
- };
-
- ScribeSpanConsumer scribe = newScribeSpanConsumer("zipkin", consumer);
-
- LogEntry entry = new LogEntry();
- entry.category = "zipkin";
- entry.message = encodedSpan;
-
- CaptureAsyncMethodCallback callback = new CaptureAsyncMethodCallback();
- scribe.Log(asList(entry), callback);
-
- // Storage related exceptions are not propagated to the caller. Only marshalling ones are.
- assertThat(callback.error).isNull();
-
- // Storage finishes after callback so wait for it.
- await().untilAsserted(() -> assertThat(scribeMetrics.messages()).isEqualTo(1));
- assertThat(scribeMetrics.messagesDropped()).isZero();
- assertThat(scribeMetrics.bytes()).isEqualTo(bytes.length);
- assertThat(scribeMetrics.spans()).isEqualTo(1);
- assertThat(scribeMetrics.spansDropped()).isEqualTo(1);
- }
-
- /**
- * Callbacks are performed asynchronously. If they throw, it hints that we are chaining futures
- * when we shouldn't
- */
- @Test void callbackExceptionDoesntThrow() throws Exception {
- consumer = (input) -> new Call.Base() {
- @Override protected Void doExecute() {
- throw new AssertionError();
- }
-
- @Override protected void doEnqueue(Callback callback) {
- callback.onError(new NullPointerException());
- }
-
- @Override public Call clone() {
- throw new AssertionError();
- }
- };
-
- ScribeSpanConsumer scribe = newScribeSpanConsumer("zipkin", consumer);
-
- LogEntry entry = new LogEntry();
- entry.category = "zipkin";
- entry.message = encodedSpan;
-
- expectSuccess(scribe, entry);
-
- // Storage finishes after callback so wait for it.
- await().untilAsserted(() -> assertThat(scribeMetrics.messages()).isEqualTo(1));
- assertThat(scribeMetrics.messagesDropped()).isZero();
- assertThat(scribeMetrics.bytes()).isEqualTo(bytes.length);
- assertThat(scribeMetrics.spans()).isEqualTo(1);
- assertThat(scribeMetrics.spansDropped()).isEqualTo(1);
- }
-
- /** Finagle's zipkin tracer breaks on a column width with a trailing newline */
- @Test void decodesSpanGeneratedByFinagle() throws Exception {
- LogEntry entry = new LogEntry();
- entry.category = "zipkin";
- entry.message = ""
- + "CgABq/sBMnzE048LAAMAAAAOZ2V0VHJhY2VzQnlJZHMKAATN0p+4EGfTdAoABav7ATJ8xNOPDwAGDAAAAAQKAAEABR/wq+2DeAsAAgAAAAJzcgwAAwgAAX8AAAEGAAIkwwsAAwAAAAx6aXBraW4tcXVlcnkAAAoAAQAFH/Cr7zj4CwACAAAIAGFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFh\n"
- + "YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhDAADCAABfwAAAQYAAiTDCwADAAAADHppcGtpbi1xdWVyeQAACgABAAUf8KwLPyILAAIAAABOR2MoOSwwLlBTU2NhdmVuZ2UsMjAxNS0wOS0xNyAxMjozNzowMiArMDAwMCwzMDQubWlsbGlzZWNvbmRzKzc2Mi5taWNyb3NlY29uZHMpDAADCAABfwAAAQYAAiTDCwADAAAADHppcGtpbi1xdWVyeQAIAAQABKZ6AAoAAQAFH/CsDLfACwACAAAAAnNzDAADCAABfwAAAQYAAiTDCwADAAAADHppcGtpbi1xdWVyeQAADwAIDAAAAAULAAEAAAATc3J2L2ZpbmFnbGUudmVyc2lvbgsAAgAAAAY2LjI4LjAIAAMAAAAGDAAECAABfwAAAQYAAgAACwADAAAADHppcGtpbi1xdWVyeQAACwABAAAAD3Nydi9tdXgvZW5hYmxlZAsAAgAAAAEBCAADAAAAAAwABAgAAX8AAAEGAAIAAAsAAwAAAAx6aXBraW4tcXVlcnkAAAsAAQAAAAJzYQsAAgAAAAEBCAADAAAAAAwABAgAAX8AAAEGAAIkwwsAAwAAAAx6aXBraW4tcXVlcnkAAAsAAQAAAAJjYQsAAgAAAAEBCAADAAAAAAwABAgAAX8AAAEGAAL5YAsAAwAAAAx6aXBraW4tcXVlcnkAAAsAAQAAAAZudW1JZHMLAAIAAAAEAAAAAQgAAwAAAAMMAAQIAAF/AAABBgACJMMLAAMAAAAMemlwa2luLXF1ZXJ5AAACAAkAAA==\n";
-
- ScribeSpanConsumer scribe = newScribeSpanConsumer(entry.category, consumer);
-
- expectSuccess(scribe, entry);
-
- // Storage finishes after callback so wait for it.
- await().untilAsserted(() -> assertThat(storage.getTraces()).containsExactly(asList(v2)));
-
- assertThat(scribeMetrics.messages()).isEqualTo(1);
- assertThat(scribeMetrics.messagesDropped()).isZero();
- assertThat(scribeMetrics.bytes())
- .isEqualTo(Base64.getMimeDecoder().decode(entry.message).length);
- assertThat(scribeMetrics.spans()).isEqualTo(1);
- assertThat(scribeMetrics.spansDropped()).isZero();
- }
-
- ScribeSpanConsumer newScribeSpanConsumer(String category, SpanConsumer spanConsumer) {
- ScribeCollector.Builder builder = ScribeCollector.newBuilder()
- .category(category)
- .metrics(scribeMetrics)
- .storage(new ForwardingStorageComponent() {
- @Override protected StorageComponent delegate() {
- throw new AssertionError();
- }
-
- @Override public SpanConsumer spanConsumer() {
- return spanConsumer;
- }
- });
- return new ScribeSpanConsumer(
- builder.delegate.build(),
- builder.metrics,
- builder.category);
- }
-}
diff --git a/zipkin-collector/scribe/src/test/resources/simplelogger.properties b/zipkin-collector/scribe/src/test/resources/simplelogger.properties
deleted file mode 100644
index 54c0b32d99d..00000000000
--- a/zipkin-collector/scribe/src/test/resources/simplelogger.properties
+++ /dev/null
@@ -1,6 +0,0 @@
-# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options
-org.slf4j.simpleLogger.logFile=System.out
-org.slf4j.simpleLogger.defaultLogLevel=warn
-org.slf4j.simpleLogger.showDateTime=true
-org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS
-org.slf4j.simpleLogger.log.zipkin2.collector.scribe=debug
diff --git a/zipkin-junit/README.md b/zipkin-junit/README.md
deleted file mode 100644
index 39fc742030b..00000000000
--- a/zipkin-junit/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# zipkin-junit
-
-This contains `ZipkinRule`, a JUnit rule to spin-up a Zipkin server during tests.
-
-ZipkinRule aims to emulate a http collector. For example, it presents
-the v1 and v2 POST apis [Zipkin Api](http://openzipkin.github.io/zipkin-api/#/), and
-supports features like gzip compression.
-
-Usage
-------
-
-For example, you can write micro-integration tests like so:
-
-```java
-@Rule
-public ZipkinRule zipkin = new ZipkinRule();
-
-// Pretend we have a traced system under test
-TracedService service = new TracedService(zipkin.httpUrl(), ReportingMode.FLUSH_EVERY);
-
-@Test
-public void skipsReportingWhenNotSampled() throws IOException {
- zipkin.storeSpans(asList(rootSpan));
-
- // send a request to the instrumented server, telling it not to sample.
- client.addHeader("X-B3-TraceId", rootSpan.traceId)
- .addHeader("X-B3-SpanId", rootSpan.id)
- .addHeader("X-B3-Sampled", 0).get(service.httpUrl());
-
- // check that zipkin didn't receive any new data in that trace
- assertThat(zipkin.getTraces()).containsOnly(asList(rootSpan));
-}
-```
-
-You can also simulate failures.
-
-For example, if you want to ensure your instrumentation doesn't retry on http 400.
-
-```java
-@Test
-public void doesntAttemptToRetryOn400() throws IOException {
- zipkin.enqueueFailure(sendErrorResponse(400, "Invalid Format"));
-
- reporter.record(span);
- reporter.flush();
-
- // check that we didn't retry on 400
- assertThat(zipkin.httpRequestCount()).isEqualTo(1);
-}
-```
-
-Besides `httpRequestCount()`, there are two other counters that can
-help you assert instrumentation is doing what you think:
-
-* `collectorMetrics()` - How many spans or bytes were collected on the http transport.
-
-These counters can validate aspects such if you are grouping spans by id
-before reporting them to the server.
diff --git a/zipkin-junit/pom.xml b/zipkin-junit/pom.xml
deleted file mode 100644
index 3f72a95470a..00000000000
--- a/zipkin-junit/pom.xml
+++ /dev/null
@@ -1,87 +0,0 @@
-
-
-
- 4.0.0
-
-
- io.zipkin
- zipkin-parent
- 2.24.4-SNAPSHOT
-
-
- io.zipkin.zipkin2
- zipkin-junit
- Zipkin JUnit
- JUnit rule to spin-up a Zipkin server during tests
-
-
- ${project.basedir}/..
-
-
-
-
- ${project.groupId}
- zipkin
- ${project.version}
-
-
- ${project.groupId}
- zipkin-collector
- ${project.version}
-
-
-
- com.squareup.okhttp3
- mockwebserver
- ${okhttp.version}
-
-
-
- junit
- junit
- ${junit.version}
-
-
-
-
- org.slf4j
- jul-to-slf4j
- ${slf4j.version}
- test
-
-
- org.slf4j
- slf4j-simple
- ${slf4j.version}
- test
-
-
-
-
- com.google.code.gson
- gson
- ${gson.version}
- test
-
-
- ${project.groupId}
- zipkin-tests
- ${project.version}
- test
-
-
-
diff --git a/zipkin-junit/src/main/java/zipkin2/junit/HttpFailure.java b/zipkin-junit/src/main/java/zipkin2/junit/HttpFailure.java
deleted file mode 100644
index 84b2a5c5af6..00000000000
--- a/zipkin-junit/src/main/java/zipkin2/junit/HttpFailure.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.junit;
-
-import okhttp3.mockwebserver.MockResponse;
-
-import static okhttp3.mockwebserver.SocketPolicy.DISCONNECT_DURING_REQUEST_BODY;
-
-/**
- * Instrumentation that use {@code POST} endpoints need to survive failures. Besides simply not
- * starting the zipkin server, you can enqueue failures like this to test edge cases. For example,
- * that you log a failure when a 400 code is returned.
- */
-public final class HttpFailure {
-
- /** Ex a network partition occurs in the middle of the POST request */
- public static HttpFailure disconnectDuringBody() {
- return new HttpFailure(new MockResponse().setSocketPolicy(DISCONNECT_DURING_REQUEST_BODY));
- }
-
- /** Ex code 400 when the server cannot read the spans */
- public static HttpFailure sendErrorResponse(int code, String body) {
- return new HttpFailure(new MockResponse().setResponseCode(code).setBody(body));
- }
-
- /** Not exposed publicly in order to not leak okhttp3 types. */
- final MockResponse response;
-
- HttpFailure(MockResponse response) {
- this.response = response;
- }
-}
diff --git a/zipkin-junit/src/main/java/zipkin2/junit/ZipkinDispatcher.java b/zipkin-junit/src/main/java/zipkin2/junit/ZipkinDispatcher.java
deleted file mode 100644
index 97082eda087..00000000000
--- a/zipkin-junit/src/main/java/zipkin2/junit/ZipkinDispatcher.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.junit;
-
-import java.io.IOException;
-import okhttp3.HttpUrl;
-import okhttp3.mockwebserver.Dispatcher;
-import okhttp3.mockwebserver.MockResponse;
-import okhttp3.mockwebserver.MockWebServer;
-import okhttp3.mockwebserver.RecordedRequest;
-import okio.Buffer;
-import okio.GzipSource;
-import zipkin2.Callback;
-import zipkin2.codec.SpanBytesDecoder;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorMetrics;
-import zipkin2.storage.StorageComponent;
-
-final class ZipkinDispatcher extends Dispatcher {
- private final Collector consumer;
- private final CollectorMetrics metrics;
- private final MockWebServer server;
-
- ZipkinDispatcher(StorageComponent storage, CollectorMetrics metrics, MockWebServer server) {
- this.consumer = Collector.newBuilder(getClass()).storage(storage).metrics(metrics).build();
- this.metrics = metrics;
- this.server = server;
- }
-
- @Override
- public MockResponse dispatch(RecordedRequest request) {
- HttpUrl url = server.url(request.getPath());
- if (request.getMethod().equals("POST")) {
- String type = request.getHeader("Content-Type");
- if (url.encodedPath().equals("/api/v1/spans")) {
- SpanBytesDecoder decoder =
- type != null && type.contains("/x-thrift")
- ? SpanBytesDecoder.THRIFT
- : SpanBytesDecoder.JSON_V1;
- return acceptSpans(request, decoder);
- } else if (url.encodedPath().equals("/api/v2/spans")) {
- SpanBytesDecoder decoder =
- type != null && type.contains("/x-protobuf")
- ? SpanBytesDecoder.PROTO3
- : SpanBytesDecoder.JSON_V2;
- return acceptSpans(request, decoder);
- }
- } else { // unsupported method
- return new MockResponse().setResponseCode(405);
- }
- return new MockResponse().setResponseCode(404);
- }
-
- MockResponse acceptSpans(RecordedRequest request, SpanBytesDecoder decoder) {
- byte[] body = request.getBody().readByteArray();
- metrics.incrementMessages();
- String encoding = request.getHeader("Content-Encoding");
- if (encoding != null && encoding.contains("gzip")) {
- try {
- Buffer result = new Buffer();
- GzipSource source = new GzipSource(new Buffer().write(body));
- while (source.read(result, Integer.MAX_VALUE) != -1) ;
- body = result.readByteArray();
- } catch (IOException e) {
- metrics.incrementMessagesDropped();
- return new MockResponse().setResponseCode(400).setBody("Cannot gunzip spans");
- }
- }
- metrics.incrementBytes(body.length);
-
- final MockResponse result = new MockResponse();
- if (body.length == 0) return result.setResponseCode(202); // lenient on empty
-
- consumer.acceptSpans(body, decoder, new Callback() {
- @Override public void onSuccess(Void value) {
- result.setResponseCode(202);
- }
-
- @Override public void onError(Throwable t) {
- String message = t.getMessage();
- result.setBody(message).setResponseCode(message.startsWith("Cannot store") ? 500 : 400);
- }
- });
- return result;
- }
-}
diff --git a/zipkin-junit/src/main/java/zipkin2/junit/ZipkinRule.java b/zipkin-junit/src/main/java/zipkin2/junit/ZipkinRule.java
deleted file mode 100644
index c5981f9fce7..00000000000
--- a/zipkin-junit/src/main/java/zipkin2/junit/ZipkinRule.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2015-2020 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.junit;
-
-import java.io.IOException;
-import java.io.UncheckedIOException;
-import java.util.List;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-import okhttp3.mockwebserver.Dispatcher;
-import okhttp3.mockwebserver.MockResponse;
-import okhttp3.mockwebserver.MockWebServer;
-import okhttp3.mockwebserver.RecordedRequest;
-import org.junit.rules.TestRule;
-import org.junit.runner.Description;
-import org.junit.runners.model.Statement;
-import zipkin2.DependencyLink;
-import zipkin2.Span;
-import zipkin2.collector.InMemoryCollectorMetrics;
-import zipkin2.internal.Nullable;
-import zipkin2.storage.InMemoryStorage;
-
-import static okhttp3.mockwebserver.SocketPolicy.KEEP_OPEN;
-
-/**
- * Starts up a local Zipkin server, listening for http requests on {@link #httpUrl}.
- *
- *
This can be used to test instrumentation. For example, you can POST spans directly to this
- * server.
- *
- *
See http://openzipkin.github.io/zipkin-api/#/
- */
-public final class ZipkinRule implements TestRule {
- private final InMemoryStorage storage = InMemoryStorage.newBuilder().build();
- private final InMemoryCollectorMetrics metrics = new InMemoryCollectorMetrics();
- private final MockWebServer server = new MockWebServer();
- private final BlockingQueue failureQueue = new LinkedBlockingQueue<>();
- private final AtomicInteger receivedSpanBytes = new AtomicInteger();
-
- public ZipkinRule() {
- final ZipkinDispatcher successDispatch = new ZipkinDispatcher(storage, metrics, server);
- Dispatcher dispatcher = new Dispatcher() {
- @Override public MockResponse dispatch(RecordedRequest request) {
- MockResponse maybeFailure = failureQueue.poll();
- if (maybeFailure != null) return maybeFailure;
- MockResponse result = successDispatch.dispatch(request);
- if (request.getMethod().equals("POST")) {
- receivedSpanBytes.addAndGet((int) request.getBodySize());
- }
- return result;
- }
-
- @Override public MockResponse peek() {
- MockResponse maybeFailure = failureQueue.peek();
- if (maybeFailure != null) return maybeFailure.clone();
- return new MockResponse().setSocketPolicy(KEEP_OPEN);
- }
- };
- server.setDispatcher(dispatcher);
- }
-
- /** Use this to connect. The zipkin v1 interface will be under "/api/v1" */
- public String httpUrl() {
- return String.format("http://%s:%s", server.getHostName(), server.getPort());
- }
-
- /** Use this to see how many requests you've sent to any zipkin http endpoint. */
- public int httpRequestCount() {
- return server.getRequestCount();
- }
-
- /** Use this to see how many spans or serialized bytes were collected on the http endpoint. */
- public InMemoryCollectorMetrics collectorMetrics() {
- return metrics;
- }
-
- /**
- * Stores the given spans directly, to setup preconditions for a test.
- *
- *
For example, if you are testing what happens when instrumentation adds a child to a trace,
- * you'd add the parent here.
- */
- public ZipkinRule storeSpans(List spans) {
- try {
- storage.accept(spans).execute();
- } catch (IOException e) {
- throw new UncheckedIOException(e);
- }
- return this;
- }
-
- /**
- * Adds a one-time failure to the http endpoint.
- *
- *
Ex. If you want to test that you don't repeatedly send bad data, you could send a 400 back.
- *
- *
- *
- * @param failure type of failure the next call to the http endpoint responds with
- */
- public ZipkinRule enqueueFailure(HttpFailure failure) {
- failureQueue.add(failure.response);
- return this;
- }
-
- /** Retrieves all traces this zipkin server has received. */
- public List> getTraces() {
- return storage.spanStore().getTraces();
- }
-
- /** Retrieves a trace by ID which Zipkin server has received, or null if not present. */
- @Nullable public List getTrace(String traceId) {
- List result;
- try {
- result = storage.traces().getTrace(traceId).execute();
- } catch (IOException e) {
- throw new AssertionError("I/O exception in in-memory storage", e);
- }
- // Note: this is a different behavior than Traces.getTrace() which is not nullable!
- return result.isEmpty() ? null : result;
- }
-
- /** Retrieves all service links between traces this zipkin server has received. */
- public List getDependencies() {
- return storage.spanStore().getDependencies();
- }
-
- /**
- * Used to manually start the server.
- *
- * @param httpPort choose 0 to select an available port
- */
- public void start(int httpPort) throws IOException {
- server.start(httpPort);
- }
-
- /**
- * Used to manually stop the server.
- */
- public void shutdown() throws IOException {
- server.shutdown();
- }
-
- @Override public Statement apply(Statement base, Description description) {
- return server.apply(base, description);
- }
-}
diff --git a/zipkin-junit/src/test/java/zipkin2/junit/ZipkinRuleTest.java b/zipkin-junit/src/test/java/zipkin2/junit/ZipkinRuleTest.java
deleted file mode 100644
index 64809dd7e9b..00000000000
--- a/zipkin-junit/src/test/java/zipkin2/junit/ZipkinRuleTest.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright 2015-2019 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.junit;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-import okhttp3.MediaType;
-import okhttp3.OkHttpClient;
-import okhttp3.Request;
-import okhttp3.RequestBody;
-import okhttp3.Response;
-import okio.Buffer;
-import okio.ByteString;
-import okio.GzipSink;
-import org.junit.AssumptionViolatedException;
-import org.junit.Rule;
-import org.junit.Test;
-import org.slf4j.bridge.SLF4JBridgeHandler;
-import zipkin2.Span;
-import zipkin2.codec.SpanBytesEncoder;
-
-import static java.util.Arrays.asList;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.failBecauseExceptionWasNotThrown;
-import static zipkin2.TestObjects.CLIENT_SPAN;
-import static zipkin2.TestObjects.LOTS_OF_SPANS;
-
-public class ZipkinRuleTest {
-
- static {
- // ensure jul-to-slf4j works
- SLF4JBridgeHandler.removeHandlersForRootLogger();
- SLF4JBridgeHandler.install();
- }
-
- @Rule public ZipkinRule zipkin = new ZipkinRule();
-
- List spans = Arrays.asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1]);
- OkHttpClient client = new OkHttpClient();
-
- @Test
- public void getTraces_storedViaPost() throws IOException {
- List trace = asList(CLIENT_SPAN);
- // write the span to the zipkin using http
- assertThat(postSpansV1(trace).code()).isEqualTo(202);
-
- // read the traces directly
- assertThat(zipkin.getTraces()).containsOnly(trace);
- }
-
- @Test
- public void getTraces_storedViaPostVersion2_json() throws IOException {
- getTraces_storedViaPostVersion2("application/json", SpanBytesEncoder.JSON_V2);
- }
-
- @Test
- public void getTraces_storedViaPostVersion2_proto3() throws IOException {
- getTraces_storedViaPostVersion2("application/x-protobuf", SpanBytesEncoder.PROTO3);
- }
-
- void getTraces_storedViaPostVersion2(String mediaType, SpanBytesEncoder encoder)
- throws IOException {
-
- byte[] message = encoder.encodeList(spans);
-
- // write the span to the zipkin using http api v2
- Response response =
- client
- .newCall(
- new Request.Builder()
- .url(zipkin.httpUrl() + "/api/v2/spans")
- .post(RequestBody.create(MediaType.parse(mediaType), message))
- .build())
- .execute();
- assertThat(response.code()).isEqualTo(202);
-
- // read the traces directly
- assertThat(zipkin.getTraces()).containsOnly(asList(spans.get(0)), asList(spans.get(1)));
- }
-
- /** The rule is here to help debugging. Even partial spans should be returned */
- @Test
- public void getTraces_whenMissingTimestamps() throws IOException {
- Span span = Span.newBuilder().traceId("1").id("1").name("foo").build();
- // write the span to the zipkin using http
- assertThat(postSpansV1(asList(span)).code()).isEqualTo(202);
-
- // read the traces directly
- assertThat(zipkin.getTraces()).containsOnly(asList(span));
- }
-
- /** The raw query can show affects like redundant rows in the data store. */
- @Test
- public void storeSpans_readbackRaw() {
- Span missingDuration = LOTS_OF_SPANS[0].toBuilder().duration(null).build();
- Span withDuration = LOTS_OF_SPANS[0];
-
- // write the span to zipkin directly
- zipkin.storeSpans(asList(missingDuration));
- zipkin.storeSpans(asList(withDuration));
-
- assertThat(zipkin.getTrace(missingDuration.traceId()))
- .containsExactly(missingDuration, withDuration);
- }
-
- @Test
- public void httpRequestCountIncrements() throws IOException {
- postSpansV1(spans);
- postSpansV1(spans);
-
- assertThat(zipkin.httpRequestCount()).isEqualTo(2);
- }
-
- /**
- * Normally, a span can be reported twice: for client and server. However, there are bugs that
- * happened where several updates went to the same span id. {@link ZipkinRule#collectorMetrics}
- * can be used to help ensure a span isn't reported more times than expected.
- */
- @Test
- public void collectorMetrics_spans() throws IOException {
- postSpansV1(asList(LOTS_OF_SPANS[0]));
- postSpansV1(asList(LOTS_OF_SPANS[1], LOTS_OF_SPANS[2]));
-
- assertThat(zipkin.collectorMetrics().spans()).isEqualTo(3);
- }
-
- @Test
- public void postSpans_disconnectDuringBody() {
- zipkin.enqueueFailure(HttpFailure.disconnectDuringBody());
-
- try {
- postSpansV1(spans);
- failBecauseExceptionWasNotThrown(IOException.class);
- } catch (IOException expected) { // not always a ConnectException!
- }
-
- // Zipkin didn't store the spans, as they shouldn't have been readable, due to disconnect
- assertThat(zipkin.getTraces()).isEmpty();
-
- try {
- // The failure shouldn't affect later requests
- assertThat(postSpansV1(spans).code()).isEqualTo(202);
- } catch (IOException flake) {
- throw new AssumptionViolatedException("test flaked", flake);
- }
- }
-
- @Test
- public void postSpans_sendErrorResponse400() throws IOException {
- zipkin.enqueueFailure(HttpFailure.sendErrorResponse(400, "Invalid Format"));
-
- Response response = postSpansV1(spans);
- assertThat(response.code()).isEqualTo(400);
- assertThat(response.body().string()).isEqualTo("Invalid Format");
-
- // Zipkin didn't store the spans, as they shouldn't have been readable, due to the error
- assertThat(zipkin.getTraces()).isEmpty();
-
- // The failure shouldn't affect later requests
- assertThat(postSpansV1(spans).code()).isEqualTo(202);
- }
-
- @Test
- public void gzippedSpans() throws IOException {
- byte[] spansInJson = SpanBytesEncoder.JSON_V1.encodeList(spans);
-
- Buffer sink = new Buffer();
- GzipSink gzipSink = new GzipSink(sink);
- gzipSink.write(new Buffer().write(spansInJson), spansInJson.length);
- gzipSink.close();
- ByteString gzippedJson = sink.readByteString();
-
- client.newCall(new Request.Builder()
- .url(zipkin.httpUrl() + "/api/v1/spans")
- .addHeader("Content-Encoding", "gzip")
- .post(RequestBody.create(MediaType.parse("application/json"), gzippedJson))
- .build()).execute();
-
- assertThat(zipkin.collectorMetrics().bytes()).isEqualTo(spansInJson.length);
- }
-
- Response postSpansV1(List spans) throws IOException {
- byte[] spansInJson = SpanBytesEncoder.JSON_V1.encodeList(spans);
- return client
- .newCall(
- new Request.Builder()
- .url(zipkin.httpUrl() + "/api/v1/spans")
- .post(RequestBody.create(MediaType.parse("application/json"), spansInJson))
- .build())
- .execute();
- }
-}
diff --git a/zipkin-junit/src/test/resources/simplelogger.properties b/zipkin-junit/src/test/resources/simplelogger.properties
deleted file mode 100644
index 3c9471563fb..00000000000
--- a/zipkin-junit/src/test/resources/simplelogger.properties
+++ /dev/null
@@ -1,9 +0,0 @@
-# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options
-
-org.slf4j.simpleLogger.logFile=System.out
-org.slf4j.simpleLogger.defaultLogLevel=warn
-org.slf4j.simpleLogger.showDateTime=true
-org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS
-
-# see MockWebServer connections
-# org.slf4j.simpleLogger.log.okhttp3=info
diff --git a/zipkin-junit5/README.md b/zipkin-junit5/README.md
deleted file mode 100644
index 4feb125c524..00000000000
--- a/zipkin-junit5/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# zipkin-junit5
-
-This contains `ZipkinExtension`, a JUnit5 extension to spin-up a Zipkin server during tests.
-
-ZipkinExtension aims to emulate a http collector. For example, it presents
-the v1 and v2 POST apis [Zipkin Api](http://openzipkin.github.io/zipkin-api/#/), and
-supports features like gzip compression.
-
-Usage
-------
-
-For example, you can write micro-integration tests like so:
-
-```java
-@RegisterExtension
-public ZipkinExtension zipkin = new ZipkinExtension();
-
-// Pretend we have a traced system under test
-TracedService service = new TracedService(zipkin.httpUrl(), ReportingMode.FLUSH_EVERY);
-
-@Test
-public void skipsReportingWhenNotSampled() throws IOException {
- zipkin.storeSpans(asList(rootSpan));
-
- // send a request to the instrumented server, telling it not to sample.
- client.addHeader("X-B3-TraceId", rootSpan.traceId)
- .addHeader("X-B3-SpanId", rootSpan.id)
- .addHeader("X-B3-Sampled", 0).get(service.httpUrl());
-
- // check that zipkin didn't receive any new data in that trace
- assertThat(zipkin.getTraces()).containsOnly(asList(rootSpan));
-}
-```
-
-You can also simulate failures.
-
-For example, if you want to ensure your instrumentation doesn't retry on http 400.
-
-```java
-@Test
-public void doesntAttemptToRetryOn400() throws IOException {
- zipkin.enqueueFailure(sendErrorResponse(400, "Invalid Format"));
-
- reporter.record(span);
- reporter.flush();
-
- // check that we didn't retry on 400
- assertThat(zipkin.httpRequestCount()).isEqualTo(1);
-}
-```
-
-Besides `httpRequestCount()`, there are two other counters that can
-help you assert instrumentation is doing what you think:
-
-* `collectorMetrics()` - How many spans or bytes were collected on the http transport.
-
-These counters can validate aspects such if you are grouping spans by id
-before reporting them to the server.
diff --git a/zipkin-junit5/pom.xml b/zipkin-junit5/pom.xml
deleted file mode 100644
index cb03768ecbc..00000000000
--- a/zipkin-junit5/pom.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-
-
-
- 4.0.0
-
-
- io.zipkin
- zipkin-parent
- 2.24.4-SNAPSHOT
-
-
- io.zipkin.zipkin2
- zipkin-junit5
- Zipkin JUnit5
- JUnit5 Extension to spin-up a Zipkin server during tests
-
-
- ${project.basedir}/..
-
-
-
-
- ${project.groupId}
- zipkin
- ${project.version}
-
-
- ${project.groupId}
- zipkin-collector
- ${project.version}
-
-
-
- com.squareup.okhttp3
- mockwebserver3-junit5
- ${okhttp5.version}
-
-
-
- org.junit.jupiter
- junit-jupiter-api
- ${junit-jupiter.version}
-
-
-
- org.slf4j
- slf4j-simple
- ${slf4j.version}
- test
-
-
-
-
- com.google.code.gson
- gson
- ${gson.version}
- test
-
-
- ${project.groupId}
- zipkin-tests
- ${project.version}
- test
-
-
-
diff --git a/zipkin-junit5/src/main/java/zipkin2/junit5/HttpFailure.java b/zipkin-junit5/src/main/java/zipkin2/junit5/HttpFailure.java
deleted file mode 100644
index 03871054dac..00000000000
--- a/zipkin-junit5/src/main/java/zipkin2/junit5/HttpFailure.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright 2015-2023 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.junit5;
-
-import mockwebserver3.MockResponse;
-
-import static mockwebserver3.SocketPolicy.DISCONNECT_DURING_REQUEST_BODY;
-
-/**
- * Instrumentation that use {@code POST} endpoints need to survive failures. Besides simply not
- * starting the zipkin server, you can enqueue failures like this to test edge cases. For example,
- * that you log a failure when a 400 code is returned.
- */
-public final class HttpFailure {
-
- /** Ex a network partition occurs in the middle of the POST request */
- public static HttpFailure disconnectDuringBody() {
- return new HttpFailure(new MockResponse().setSocketPolicy(DISCONNECT_DURING_REQUEST_BODY));
- }
-
- /** Ex code 400 when the server cannot read the spans */
- public static HttpFailure sendErrorResponse(int code, String body) {
- return new HttpFailure(new MockResponse().setResponseCode(code).setBody(body));
- }
-
- /** Not exposed publicly in order to not leak okhttp3 types. */
- final MockResponse response;
-
- HttpFailure(MockResponse response) {
- this.response = response;
- }
-}
diff --git a/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinDispatcher.java b/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinDispatcher.java
deleted file mode 100644
index 8d3d74777bb..00000000000
--- a/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinDispatcher.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright 2015-2023 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.junit5;
-
-import okhttp3.HttpUrl;
-import mockwebserver3.Dispatcher;
-import mockwebserver3.MockResponse;
-import mockwebserver3.MockWebServer;
-import mockwebserver3.RecordedRequest;
-import okio.Buffer;
-import okio.GzipSource;
-import zipkin2.Callback;
-import zipkin2.codec.SpanBytesDecoder;
-import zipkin2.collector.Collector;
-import zipkin2.collector.CollectorMetrics;
-import zipkin2.storage.StorageComponent;
-
-import java.io.IOException;
-
-final class ZipkinDispatcher extends Dispatcher {
- private final Collector consumer;
- private final CollectorMetrics metrics;
- private final MockWebServer server;
-
- ZipkinDispatcher(StorageComponent storage, CollectorMetrics metrics, MockWebServer server) {
- this.consumer = Collector.newBuilder(getClass()).storage(storage).metrics(metrics).build();
- this.metrics = metrics;
- this.server = server;
- }
-
- @Override
- public MockResponse dispatch(RecordedRequest request) {
- HttpUrl url = server.url(request.getPath());
- if (request.getMethod().equals("POST")) {
- String type = request.getHeader("Content-Type");
- if (url.encodedPath().equals("/api/v1/spans")) {
- SpanBytesDecoder decoder =
- type != null && type.contains("/x-thrift")
- ? SpanBytesDecoder.THRIFT
- : SpanBytesDecoder.JSON_V1;
- return acceptSpans(request, decoder);
- } else if (url.encodedPath().equals("/api/v2/spans")) {
- SpanBytesDecoder decoder =
- type != null && type.contains("/x-protobuf")
- ? SpanBytesDecoder.PROTO3
- : SpanBytesDecoder.JSON_V2;
- return acceptSpans(request, decoder);
- }
- } else { // unsupported method
- return new MockResponse().setResponseCode(405);
- }
- return new MockResponse().setResponseCode(404);
- }
-
- MockResponse acceptSpans(RecordedRequest request, SpanBytesDecoder decoder) {
- byte[] body = request.getBody().readByteArray();
- metrics.incrementMessages();
- String encoding = request.getHeader("Content-Encoding");
- if (encoding != null && encoding.contains("gzip")) {
- try {
- Buffer result = new Buffer();
- GzipSource source = new GzipSource(new Buffer().write(body));
- while (source.read(result, Integer.MAX_VALUE) != -1) ;
- body = result.readByteArray();
- } catch (IOException e) {
- metrics.incrementMessagesDropped();
- return new MockResponse().setResponseCode(400).setBody("Cannot gunzip spans");
- }
- }
- metrics.incrementBytes(body.length);
-
- final MockResponse result = new MockResponse();
- if (body.length == 0) return result.setResponseCode(202); // lenient on empty
-
- consumer.acceptSpans(body, decoder, new Callback() {
- @Override public void onSuccess(Void value) {
- result.setResponseCode(202);
- }
-
- @Override public void onError(Throwable t) {
- String message = t.getMessage();
- result.setBody(message).setResponseCode(message.startsWith("Cannot store") ? 500 : 400);
- }
- });
- return result;
- }
-}
diff --git a/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinExtension.java b/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinExtension.java
deleted file mode 100644
index dcf87528d51..00000000000
--- a/zipkin-junit5/src/main/java/zipkin2/junit5/ZipkinExtension.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright 2015-2023 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.junit5;
-
-import mockwebserver3.MockWebServer;
-import mockwebserver3.Dispatcher;
-import mockwebserver3.MockResponse;
-import mockwebserver3.RecordedRequest;
-import org.junit.jupiter.api.extension.AfterEachCallback;
-import org.junit.jupiter.api.extension.BeforeEachCallback;
-import org.junit.jupiter.api.extension.ExtensionContext;
-import zipkin2.DependencyLink;
-import zipkin2.Span;
-import zipkin2.collector.InMemoryCollectorMetrics;
-import zipkin2.internal.Nullable;
-import zipkin2.storage.InMemoryStorage;
-
-import java.io.IOException;
-import java.io.UncheckedIOException;
-import java.util.List;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static mockwebserver3.SocketPolicy.KEEP_OPEN;
-
-/**
- * Starts up a local Zipkin server, listening for http requests on {@link #httpUrl}.
- *
- *
This can be used to test instrumentation. For example, you can POST spans directly to this
- * server.
- *
- *
See http://openzipkin.github.io/zipkin-api/#/
- */
-public final class ZipkinExtension implements BeforeEachCallback, AfterEachCallback {
- private final InMemoryStorage storage = InMemoryStorage.newBuilder().build();
- private final InMemoryCollectorMetrics metrics = new InMemoryCollectorMetrics();
- private final MockWebServer server = new MockWebServer();
- private final BlockingQueue failureQueue = new LinkedBlockingQueue<>();
- private final AtomicInteger receivedSpanBytes = new AtomicInteger();
-
- public ZipkinExtension() {
- final ZipkinDispatcher successDispatch = new ZipkinDispatcher(storage, metrics, server);
- Dispatcher dispatcher = new Dispatcher() {
- @Override public MockResponse dispatch(RecordedRequest request) {
- MockResponse maybeFailure = failureQueue.poll();
- if (maybeFailure != null) return maybeFailure;
- MockResponse result = successDispatch.dispatch(request);
- if (request.getMethod().equals("POST")) {
- receivedSpanBytes.addAndGet((int) request.getBodySize());
- }
- return result;
- }
-
- @Override public MockResponse peek() {
- MockResponse maybeFailure = failureQueue.peek();
- if (maybeFailure != null) return maybeFailure.clone();
- return new MockResponse().setSocketPolicy(KEEP_OPEN);
- }
- };
- server.setDispatcher(dispatcher);
- }
-
- /** Use this to connect. The zipkin v1 interface will be under "/api/v1" */
- public String httpUrl() {
- return String.format("http://%s:%s", server.getHostName(), server.getPort());
- }
-
- /** Use this to see how many requests you've sent to any zipkin http endpoint. */
- public int httpRequestCount() {
- return server.getRequestCount();
- }
-
- /** Use this to see how many spans or serialized bytes were collected on the http endpoint. */
- public InMemoryCollectorMetrics collectorMetrics() {
- return metrics;
- }
-
- /**
- * Stores the given spans directly, to setup preconditions for a test.
- *
- *
For example, if you are testing what happens when instrumentation adds a child to a trace,
- * you'd add the parent here.
- */
- public ZipkinExtension storeSpans(List spans) {
- try {
- storage.accept(spans).execute();
- } catch (IOException e) {
- throw new UncheckedIOException(e);
- }
- return this;
- }
-
- /**
- * Adds a one-time failure to the http endpoint.
- *
- *
Ex. If you want to test that you don't repeatedly send bad data, you could send a 400 back.
- *
- *
- *
- * @param failure type of failure the next call to the http endpoint responds with
- */
- public ZipkinExtension enqueueFailure(HttpFailure failure) {
- failureQueue.add(failure.response);
- return this;
- }
-
- /** Retrieves all traces this zipkin server has received. */
- public List> getTraces() {
- return storage.spanStore().getTraces();
- }
-
- /** Retrieves a trace by ID which Zipkin server has received, or null if not present. */
- @Nullable public List getTrace(String traceId) {
- List result;
- try {
- result = storage.traces().getTrace(traceId).execute();
- } catch (IOException e) {
- throw new AssertionError("I/O exception in in-memory storage", e);
- }
- // Note: this is a different behavior than Traces.getTrace() which is not nullable!
- return result.isEmpty() ? null : result;
- }
-
- /** Retrieves all service links between traces this zipkin server has received. */
- public List getDependencies() {
- return storage.spanStore().getDependencies();
- }
-
- /**
- * Used to manually start the server.
- *
- * @param httpPort choose 0 to select an available port
- */
- public void start(int httpPort) throws IOException {
- server.start(httpPort);
- }
-
- /**
- * Used to manually stop the server.
- */
- public void shutdown() throws IOException {
- server.shutdown();
- }
-
- @Override
- public void beforeEach(ExtensionContext extensionContext) throws Exception {
-
- }
-
- @Override
- public void afterEach(ExtensionContext extensionContext) throws Exception {
-
- }
-}
diff --git a/zipkin-junit5/src/test/java/zipkin2/junit5/ZipkinExtensionTest.java b/zipkin-junit5/src/test/java/zipkin2/junit5/ZipkinExtensionTest.java
deleted file mode 100644
index 22891772174..00000000000
--- a/zipkin-junit5/src/test/java/zipkin2/junit5/ZipkinExtensionTest.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright 2015-2023 The OpenZipkin Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
- * in compliance with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package zipkin2.junit5;
-
-import okhttp3.MediaType;
-import okhttp3.OkHttpClient;
-import okhttp3.Request;
-import okhttp3.RequestBody;
-import okhttp3.Response;
-import okio.Buffer;
-import okio.ByteString;
-import okio.GzipSink;
-import org.junit.AssumptionViolatedException;
-import org.junit.Test;
-import org.junit.jupiter.api.extension.RegisterExtension;
-import zipkin2.Span;
-import zipkin2.codec.SpanBytesEncoder;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-
-import static java.util.Arrays.asList;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.failBecauseExceptionWasNotThrown;
-import static zipkin2.TestObjects.CLIENT_SPAN;
-import static zipkin2.TestObjects.LOTS_OF_SPANS;
-
-public class ZipkinExtensionTest {
-
- @RegisterExtension
- public ZipkinExtension zipkin = new ZipkinExtension();
-
- List spans = Arrays.asList(LOTS_OF_SPANS[0], LOTS_OF_SPANS[1]);
- OkHttpClient client = new OkHttpClient();
-
- @Test
- public void getTraces_storedViaPost() throws IOException {
- List trace = asList(CLIENT_SPAN);
- // write the span to the zipkin using http
- assertThat(postSpansV1(trace).code()).isEqualTo(202);
-
- // read the traces directly
- assertThat(zipkin.getTraces()).containsOnly(trace);
- }
-
- @Test
- public void getTraces_storedViaPostVersion2_json() throws IOException {
- getTraces_storedViaPostVersion2("application/json", SpanBytesEncoder.JSON_V2);
- }
-
- @Test
- public void getTraces_storedViaPostVersion2_proto3() throws IOException {
- getTraces_storedViaPostVersion2("application/x-protobuf", SpanBytesEncoder.PROTO3);
- }
-
- void getTraces_storedViaPostVersion2(String mediaType, SpanBytesEncoder encoder)
- throws IOException {
-
- byte[] message = encoder.encodeList(spans);
-
- // write the span to the zipkin using http api v2
- Response response =
- client
- .newCall(
- new Request.Builder()
- .url(zipkin.httpUrl() + "/api/v2/spans")
- .post(RequestBody.create(MediaType.parse(mediaType), message))
- .build())
- .execute();
- assertThat(response.code()).isEqualTo(202);
-
- // read the traces directly
- assertThat(zipkin.getTraces()).containsOnly(asList(spans.get(0)), asList(spans.get(1)));
- }
-
- /** The rule is here to help debugging. Even partial spans should be returned */
- @Test
- public void getTraces_whenMissingTimestamps() throws IOException {
- Span span = Span.newBuilder().traceId("1").id("1").name("foo").build();
- // write the span to the zipkin using http
- assertThat(postSpansV1(asList(span)).code()).isEqualTo(202);
-
- // read the traces directly
- assertThat(zipkin.getTraces()).containsOnly(asList(span));
- }
-
- /** The raw query can show affects like redundant rows in the data store. */
- @Test
- public void storeSpans_readbackRaw() {
- Span missingDuration = LOTS_OF_SPANS[0].toBuilder().duration(null).build();
- Span withDuration = LOTS_OF_SPANS[0];
-
- // write the span to zipkin directly
- zipkin.storeSpans(asList(missingDuration));
- zipkin.storeSpans(asList(withDuration));
-
- assertThat(zipkin.getTrace(missingDuration.traceId()))
- .containsExactly(missingDuration, withDuration);
- }
-
- @Test
- public void httpRequestCountIncrements() throws IOException {
- postSpansV1(spans);
- postSpansV1(spans);
-
- assertThat(zipkin.httpRequestCount()).isEqualTo(2);
- }
-
- /**
- * Normally, a span can be reported twice: for client and server. However, there are bugs that
- * happened where several updates went to the same span id. {@link ZipkinExtension#collectorMetrics}
- * can be used to help ensure a span isn't reported more times than expected.
- */
- @Test
- public void collectorMetrics_spans() throws IOException {
- postSpansV1(asList(LOTS_OF_SPANS[0]));
- postSpansV1(asList(LOTS_OF_SPANS[1], LOTS_OF_SPANS[2]));
-
- assertThat(zipkin.collectorMetrics().spans()).isEqualTo(3);
- }
-
- @Test
- public void postSpans_disconnectDuringBody() {
- zipkin.enqueueFailure(HttpFailure.disconnectDuringBody());
-
- try {
- postSpansV1(spans);
- failBecauseExceptionWasNotThrown(IOException.class);
- } catch (IOException expected) { // not always a ConnectException!
- }
-
- // Zipkin didn't store the spans, as they shouldn't have been readable, due to disconnect
- assertThat(zipkin.getTraces()).isEmpty();
-
- try {
- // The failure shouldn't affect later requests
- assertThat(postSpansV1(spans).code()).isEqualTo(202);
- } catch (IOException flake) {
- throw new AssumptionViolatedException("test flaked", flake);
- }
- }
-
- @Test
- public void postSpans_sendErrorResponse400() throws IOException {
- zipkin.enqueueFailure(HttpFailure.sendErrorResponse(400, "Invalid Format"));
-
- Response response = postSpansV1(spans);
- assertThat(response.code()).isEqualTo(400);
- assertThat(response.body().string()).isEqualTo("Invalid Format");
-
- // Zipkin didn't store the spans, as they shouldn't have been readable, due to the error
- assertThat(zipkin.getTraces()).isEmpty();
-
- // The failure shouldn't affect later requests
- assertThat(postSpansV1(spans).code()).isEqualTo(202);
- }
-
- @Test
- public void gzippedSpans() throws IOException {
- byte[] spansInJson = SpanBytesEncoder.JSON_V1.encodeList(spans);
-
- Buffer sink = new Buffer();
- GzipSink gzipSink = new GzipSink(sink);
- gzipSink.write(new Buffer().write(spansInJson), spansInJson.length);
- gzipSink.close();
- ByteString gzippedJson = sink.readByteString();
-
- client.newCall(new Request.Builder()
- .url(zipkin.httpUrl() + "/api/v1/spans")
- .addHeader("Content-Encoding", "gzip")
- .post(RequestBody.create(MediaType.parse("application/json"), gzippedJson))
- .build()).execute();
-
- assertThat(zipkin.collectorMetrics().bytes()).isEqualTo(spansInJson.length);
- }
-
- Response postSpansV1(List spans) throws IOException {
- byte[] spansInJson = SpanBytesEncoder.JSON_V1.encodeList(spans);
- return client
- .newCall(
- new Request.Builder()
- .url(zipkin.httpUrl() + "/api/v1/spans")
- .post(RequestBody.create(MediaType.parse("application/json"), spansInJson))
- .build())
- .execute();
- }
-}
diff --git a/zipkin-junit5/src/test/resources/simplelogger.properties b/zipkin-junit5/src/test/resources/simplelogger.properties
deleted file mode 100644
index 0407cd2f055..00000000000
--- a/zipkin-junit5/src/test/resources/simplelogger.properties
+++ /dev/null
@@ -1,6 +0,0 @@
-# See https://www.slf4j.org/api/org/slf4j/impl/SimpleLogger.html for the full list of config options
-
-org.slf4j.simpleLogger.logFile=System.out
-org.slf4j.simpleLogger.defaultLogLevel=warn
-org.slf4j.simpleLogger.showDateTime=true
-org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS
diff --git a/zipkin-server/pom.xml b/zipkin-server/pom.xml
index eec206e7366..655dfe144d5 100644
--- a/zipkin-server/pom.xml
+++ b/zipkin-server/pom.xml
@@ -16,15 +16,15 @@
-->
4.0.0
-
+ pomio.zipkinzipkin-parent2.24.4-SNAPSHOT
- zipkin-server
- Zipkin Server
+ zipkin-server-parent
+ Zipkin Server Parent${project.basedir}/..
@@ -35,8 +35,30 @@
5.13.22.17.1${project.build.directory}/generated-test-sources/wire
+
+ 9.7.0-SNAPSHOT
+
+
+ ../skywalking/apm-protocol
+ ../skywalking/oap-server-bom
+ ../skywalking/oap-server/server-core
+ ../skywalking/oap-server/server-receiver-plugin/receiver-proto
+ ../skywalking/oap-server/server-receiver-plugin/zipkin-receiver-plugin
+ ../skywalking/oap-server/server-cluster-plugin/cluster-standalone-plugin
+ ../skywalking/oap-server/server-storage-plugin
+ ../skywalking/oap-server/server-library
+ ../skywalking/oap-server/server-query-plugin/zipkin-query-plugin
+ ../skywalking/oap-server/server-telemetry
+ ../skywalking/oap-server/server-testing
+ ../skywalking/oap-server/server-configuration/configuration-api
+ ../skywalking/oap-server/ai-pipeline
+
+ server-core
+ server-starter
+
+
@@ -79,44 +101,6 @@
-
-
- org.springframework.boot
- spring-boot-starter
- ${spring-boot.version}
-
-
- org.springframework.boot
- spring-boot-starter-logging
-
-
-
-
- org.springframework.boot
- spring-boot-starter-actuator
- ${spring-boot.version}
-
-
- org.springframework.boot
- spring-boot-starter-logging
-
-
-
-
-
-
- org.yaml
- snakeyaml
- ${snakeyaml.version}
-
-
-
-
- org.xerial.snappy
- snappy-java
- ${snappy.version}
-
-
org.apache.logging.log4j
@@ -147,70 +131,12 @@
compile
-
-
- org.springframework.boot
- spring-boot-starter-log4j2
- ${spring-boot.version}
-
-
-
-
- ${armeria.groupId}
- armeria-spring-boot2-autoconfigure
- ${armeria.version}
-
-
- ${armeria.groupId}
- armeria-logback
-
-
- javax.validation
- validation-api
-
-
-
-
- ${armeria.groupId}
- armeria-brave
- ${armeria.version}
-
-
- ${armeria.groupId}
- armeria-grpc-protocol
- ${armeria.version}
-
-
-
-
- io.micrometer
- micrometer-registry-prometheus
- ${micrometer.version}
-
-
-
- com.netflix.concurrency-limits
- concurrency-limits-core
- 0.3.6
-
-
- io.micrometer
- micrometer-core
- ${micrometer.version}
-
-
${project.groupId}.zipkin2zipkin${project.version}
-
- ${project.groupId}.zipkin2
- zipkin-collector
- ${project.version}
-
-
org.slf4jslf4j-api
@@ -230,76 +156,6 @@
provided
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-storage-cassandra
- ${project.version}
- true
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-storage-elasticsearch
- ${project.version}
- true
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-storage-mysql-v1
- ${project.version}
- true
-
-
- org.mariadb.jdbc
- mariadb-java-client
- ${mariadb-java-client.version}
- true
-
-
- com.zaxxer
- HikariCP
- ${HikariCP.version}
- true
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-collector-activemq
- ${project.version}
- true
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-collector-kafka
- ${project.version}
- true
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-collector-rabbitmq
- ${project.version}
- true
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-collector-scribe
- ${project.version}
- true
-
-
io.zipkin.brave
@@ -326,13 +182,6 @@
${zipkin-proto3.version}test
-
-
- com.google.code.gson
- gson
- ${gson.version}
- test
- com.squareup.wire
@@ -341,107 +190,15 @@
test
-
- com.squareup.okhttp3
- okhttp
- ${okhttp.version}
- test
-
-
-
- com.squareup.okio
- okio
-
-
-
-
-
- ${armeria.groupId}
- armeria-junit5
- ${armeria.version}
- test
-
-
-
- ${armeria.groupId}
- armeria-junit4
- ${armeria.version}
- test
-
-
-
-
- org.springframework.boot
- spring-boot-test-autoconfigure
- ${spring-boot.version}
-
-
- *
- *
-
-
- test
-
-
- org.springframework.boot
- spring-boot-test
- ${spring-boot.version}
-
-
- *
- *
-
-
- test
-
-
- org.springframework
- spring-test
- ${spring.version}
- test
-
-
${project.groupId}.zipkin2zipkin-tests${project.version}test
-
-
- org.awaitility
- awaitility
- ${awaitility.version}
- test
-
-
-
- com.jayway.jsonpath
- json-path
- 2.4.0
- test
-
-
- actuator
-
-
- !skipActuator
-
-
-
-
-
- ${armeria.groupId}
- armeria-spring-boot2-actuator-autoconfigure
- ${armeria.version}
- true
-
-
- include-lens
@@ -460,204 +217,4 @@
-
-
-
- src/main/resources
- true
-
-
-
-
-
- maven-dependency-plugin
-
-
- com.squareup.wire
- wire-maven-plugin
-
-
- generate-test-sources
-
- generate-sources
-
-
- ${proto.generatedSourceDirectory}
-
-
-
-
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
- 3.2.0
-
-
- add-test-source
- generate-test-sources
-
- add-test-source
-
-
-
-
-
-
-
-
-
-
-
- pl.project13.maven
- git-commit-id-plugin
- ${git-commit-id.version}
-
-
- extract-git-info
-
- revision
-
-
-
-
- false
-
-
-
-
- org.springframework.boot
- spring-boot-maven-plugin
- ${spring-boot.version}
-
- zipkin.server.ZipkinServer
- true
-
-
-
- exec
-
- repackage
-
-
- exec
-
-
- org.slf4j
- slf4j-simple
-
-
-
-
-
-
-
- slim
-
- repackage
-
-
- slim
-
-
- com.google.auto.value,com.google.guava,io.dropwizard.metrics,com.datastax.oss,com.github.jnr,org.ow2.asm,org.jooq,javax.xml.bind,org.mariadb.jdbc,com.zaxxer,org.apache.activemq,org.apache.geronimo.specs,org.fusesource.hawtbuf,org.apache.kafka,com.github.luben,org.lz4,org.xerial.snappy,com.rabbitmq,jakarta.annotation,org.apache.thrift,org.apache.logging.log4j
-
-
-
-
- ${armeria.groupId}
- armeria-spring-boot2-actuator-autoconfigure
-
-
- org.springframework.boot
- spring-boot-actuator-autoconfigure
-
-
- org.springframework.boot
- spring-boot-actuator
-
-
- com.fasterxml.jackson.datatype
- jackson-datatype-jsr310
-
-
-
-
- org.springframework.boot
- spring-boot-starter-log4j2
-
-
-
-
- io.netty
- netty-tcnative-boringssl-static
-
-
- io.netty
- netty-codec-haproxy
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-storage-cassandra
-
-
- io.zipkin.brave.cassandra
- brave-instrumentation-cassandra-driver
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-storage-mysql-v1
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-collector-activemq
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-collector-kafka
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-collector-rabbitmq
-
-
-
-
- ${project.groupId}.zipkin2
- zipkin-collector-scribe
-
-
- ${armeria.groupId}
- armeria-thrift0.15
-
-
-
-
-
-
-
-
diff --git a/zipkin-server/server-core/pom.xml b/zipkin-server/server-core/pom.xml
new file mode 100644
index 00000000000..abb0b4c9fb0
--- /dev/null
+++ b/zipkin-server/server-core/pom.xml
@@ -0,0 +1,24 @@
+
+
+ 4.0.0
+
+
+ zipkin-server-parent
+ io.zipkin
+ 2.24.4-SNAPSHOT
+
+
+ zipkin-server-core
+ Zipkin Server Core
+
+
+
+ org.apache.skywalking
+ server-core
+ ${skywalking.version}
+
+
+
+
\ No newline at end of file
diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleConfig.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleConfig.java
new file mode 100644
index 00000000000..cba2b402576
--- /dev/null
+++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleConfig.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package zipkin.server.core;
+
+import org.apache.skywalking.oap.server.library.module.ModuleConfig;
+
+public class CoreModuleConfig extends ModuleConfig {
+ /**
+ * The max length of the service name.
+ */
+ private int serviceNameMaxLength = 70;
+ /**
+ * The max length of the service instance name.
+ */
+ private int instanceNameMaxLength = 70;
+ /**
+ * The max length of the endpoint name.
+ *
+ *
NOTICE
+ * In the current practice, we don't recommend the length over 190.
+ */
+ private int endpointNameMaxLength = 150;
+ /**
+ * The period of L1 aggregation flush. Unit is ms.
+ */
+ private long l1FlushPeriod = 500;
+ /**
+ * The threshold of session time. Unit is ms. Default value is 70s.
+ */
+ private long storageSessionTimeout = 70_000;
+ /**
+ * The service cache refresh interval, default 10s
+ */
+ private int serviceCacheRefreshInterval = 10;
+ /**
+ * The time to live of all metrics data. Unit is day.
+ */
+ private int metricsDataTTL = 3;
+ /**
+ * The time to live of all record data, including tracing. Unit is Day.
+ */
+ private int recordDataTTL = 7;
+
+ public int getServiceNameMaxLength() {
+ return serviceNameMaxLength;
+ }
+
+ public int getInstanceNameMaxLength() {
+ return instanceNameMaxLength;
+ }
+
+ public int getEndpointNameMaxLength() {
+ return endpointNameMaxLength;
+ }
+
+ public long getL1FlushPeriod() {
+ return l1FlushPeriod;
+ }
+
+ public long getStorageSessionTimeout() {
+ return storageSessionTimeout;
+ }
+
+ public int getServiceCacheRefreshInterval() {
+ return serviceCacheRefreshInterval;
+ }
+
+ public int getMetricsDataTTL() {
+ return metricsDataTTL;
+ }
+
+ public int getRecordDataTTL() {
+ return recordDataTTL;
+ }
+
+ public org.apache.skywalking.oap.server.core.CoreModuleConfig toSkyWalkingConfig() {
+ final org.apache.skywalking.oap.server.core.CoreModuleConfig result = new org.apache.skywalking.oap.server.core.CoreModuleConfig();
+ result.setServiceCacheRefreshInterval(serviceCacheRefreshInterval);
+ return result;
+ }
+}
diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleProvider.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleProvider.java
new file mode 100644
index 00000000000..94ddf289fba
--- /dev/null
+++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/CoreModuleProvider.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package zipkin.server.core;
+
+import org.apache.skywalking.oap.server.core.CoreModule;
+import org.apache.skywalking.oap.server.core.analysis.meter.MeterSystem;
+import org.apache.skywalking.oap.server.core.analysis.worker.MetricsStreamProcessor;
+import org.apache.skywalking.oap.server.core.cache.NetworkAddressAliasCache;
+import org.apache.skywalking.oap.server.core.cache.ProfileTaskCache;
+import org.apache.skywalking.oap.server.core.command.CommandService;
+import org.apache.skywalking.oap.server.core.config.ConfigService;
+import org.apache.skywalking.oap.server.core.config.DownSamplingConfigService;
+import org.apache.skywalking.oap.server.core.config.IComponentLibraryCatalogService;
+import org.apache.skywalking.oap.server.core.config.NamingControl;
+import org.apache.skywalking.oap.server.core.config.group.EndpointNameGrouping;
+import org.apache.skywalking.oap.server.core.management.ui.menu.UIMenuManagementService;
+import org.apache.skywalking.oap.server.core.management.ui.template.UITemplateManagementService;
+import org.apache.skywalking.oap.server.core.oal.rt.OALEngineLoaderService;
+import org.apache.skywalking.oap.server.core.profiling.continuous.ContinuousProfilingMutationService;
+import org.apache.skywalking.oap.server.core.profiling.continuous.ContinuousProfilingQueryService;
+import org.apache.skywalking.oap.server.core.profiling.ebpf.EBPFProfilingMutationService;
+import org.apache.skywalking.oap.server.core.profiling.ebpf.EBPFProfilingQueryService;
+import org.apache.skywalking.oap.server.core.profiling.trace.ProfileTaskMutationService;
+import org.apache.skywalking.oap.server.core.profiling.trace.ProfileTaskQueryService;
+import org.apache.skywalking.oap.server.core.query.AggregationQueryService;
+import org.apache.skywalking.oap.server.core.query.AlarmQueryService;
+import org.apache.skywalking.oap.server.core.query.BrowserLogQueryService;
+import org.apache.skywalking.oap.server.core.query.EventQueryService;
+import org.apache.skywalking.oap.server.core.query.LogQueryService;
+import org.apache.skywalking.oap.server.core.query.MetadataQueryService;
+import org.apache.skywalking.oap.server.core.query.MetricsMetadataQueryService;
+import org.apache.skywalking.oap.server.core.query.MetricsQueryService;
+import org.apache.skywalking.oap.server.core.query.RecordQueryService;
+import org.apache.skywalking.oap.server.core.query.TagAutoCompleteQueryService;
+import org.apache.skywalking.oap.server.core.query.TopNRecordsQueryService;
+import org.apache.skywalking.oap.server.core.query.TopologyQueryService;
+import org.apache.skywalking.oap.server.core.query.TraceQueryService;
+import org.apache.skywalking.oap.server.core.remote.RemoteSenderService;
+import org.apache.skywalking.oap.server.core.remote.client.RemoteClientManager;
+import org.apache.skywalking.oap.server.core.server.GRPCHandlerRegister;
+import org.apache.skywalking.oap.server.core.server.HTTPHandlerRegister;
+import org.apache.skywalking.oap.server.core.source.SourceReceiver;
+import org.apache.skywalking.oap.server.core.source.SourceReceiverImpl;
+import org.apache.skywalking.oap.server.core.status.ServerStatusService;
+import org.apache.skywalking.oap.server.core.storage.model.IModelManager;
+import org.apache.skywalking.oap.server.core.storage.model.ModelCreator;
+import org.apache.skywalking.oap.server.core.storage.model.ModelManipulator;
+import org.apache.skywalking.oap.server.core.storage.model.StorageModels;
+import org.apache.skywalking.oap.server.core.worker.IWorkerInstanceGetter;
+import org.apache.skywalking.oap.server.core.worker.IWorkerInstanceSetter;
+import org.apache.skywalking.oap.server.core.worker.WorkerInstancesService;
+import org.apache.skywalking.oap.server.library.module.ModuleConfig;
+import org.apache.skywalking.oap.server.library.module.ModuleDefine;
+import org.apache.skywalking.oap.server.library.module.ModuleProvider;
+import org.apache.skywalking.oap.server.library.module.ModuleStartException;
+import org.apache.skywalking.oap.server.library.module.ServiceNotProvidedException;
+import zipkin.server.core.services.EmptyComponentLibraryCatalogService;
+import zipkin.server.core.services.EmptyGRPCHandlerRegister;
+import zipkin.server.core.services.EmptyHTTPHandlerRegister;
+import zipkin.server.core.services.EmptyNetworkAddressAliasCache;
+import zipkin.server.core.services.ZipkinConfigService;
+
+import java.util.Collections;
+
+public class CoreModuleProvider extends ModuleProvider {
+ private CoreModuleConfig moduleConfig;
+
+ private EndpointNameGrouping endpointNameGrouping;
+ private final SourceReceiverImpl receiver;
+ private final StorageModels storageModels;
+
+ public CoreModuleProvider() {
+ this.receiver = new SourceReceiverImpl();
+ this.storageModels = new StorageModels();
+ }
+
+ @Override
+ public String name() {
+ return "zipkin";
+ }
+
+ @Override
+ public Class extends ModuleDefine> module() {
+ return CoreModule.class;
+ }
+
+ @Override
+ public ConfigCreator extends ModuleConfig> newConfigCreator() {
+ return new ConfigCreator() {
+ @Override
+ public Class type() {
+ return CoreModuleConfig.class;
+ }
+
+ @Override
+ public void onInitialized(CoreModuleConfig initialized) {
+ moduleConfig = initialized;
+ }
+ };
+ }
+
+ @Override
+ public void prepare() throws ServiceNotProvidedException, ModuleStartException {
+ endpointNameGrouping = new EndpointNameGrouping();
+ final NamingControl namingControl = new NamingControl(
+ moduleConfig.getServiceNameMaxLength(),
+ moduleConfig.getInstanceNameMaxLength(),
+ moduleConfig.getEndpointNameMaxLength(),
+ endpointNameGrouping
+ );
+ this.registerServiceImplementation(NamingControl.class, namingControl);
+
+ final org.apache.skywalking.oap.server.core.CoreModuleConfig swConfig = this.moduleConfig.toSkyWalkingConfig();
+ this.registerServiceImplementation(MeterSystem.class, new MeterSystem(getManager()));
+ this.registerServiceImplementation(ConfigService.class, new ZipkinConfigService(moduleConfig, this));
+ this.registerServiceImplementation(ServerStatusService.class, new ServerStatusService(getManager()));
+ this.registerServiceImplementation(DownSamplingConfigService.class, new DownSamplingConfigService(Collections.emptyList()));
+ this.registerServiceImplementation(GRPCHandlerRegister.class, new EmptyGRPCHandlerRegister());
+ this.registerServiceImplementation(HTTPHandlerRegister.class, new EmptyHTTPHandlerRegister());
+ this.registerServiceImplementation(IComponentLibraryCatalogService.class, new EmptyComponentLibraryCatalogService());
+ this.registerServiceImplementation(SourceReceiver.class, receiver);
+ final WorkerInstancesService instancesService = new WorkerInstancesService();
+ this.registerServiceImplementation(IWorkerInstanceGetter.class, instancesService);
+ this.registerServiceImplementation(IWorkerInstanceSetter.class, instancesService);
+ this.registerServiceImplementation(RemoteSenderService.class, new RemoteSenderService(getManager()));
+ this.registerServiceImplementation(RemoteSenderService.class, new RemoteSenderService(getManager()));
+ this.registerServiceImplementation(ModelCreator.class, storageModels);
+ this.registerServiceImplementation(IModelManager.class, storageModels);
+ this.registerServiceImplementation(ModelManipulator.class, storageModels);
+ this.registerServiceImplementation(NetworkAddressAliasCache.class, new EmptyNetworkAddressAliasCache());
+ this.registerServiceImplementation(TopologyQueryService.class, new TopologyQueryService(getManager(), storageModels));
+ this.registerServiceImplementation(MetricsMetadataQueryService.class, new MetricsMetadataQueryService());
+ this.registerServiceImplementation(MetricsQueryService.class, new MetricsQueryService(getManager()));
+ this.registerServiceImplementation(TraceQueryService.class, new TraceQueryService(getManager()));
+ this.registerServiceImplementation(BrowserLogQueryService.class, new BrowserLogQueryService(getManager()));
+ this.registerServiceImplementation(LogQueryService.class, new LogQueryService(getManager()));
+ this.registerServiceImplementation(MetadataQueryService.class, new MetadataQueryService(getManager(), swConfig));
+ this.registerServiceImplementation(AggregationQueryService.class, new AggregationQueryService(getManager()));
+ this.registerServiceImplementation(AlarmQueryService.class, new AlarmQueryService(getManager()));
+ this.registerServiceImplementation(TopNRecordsQueryService.class, new TopNRecordsQueryService(getManager()));
+ this.registerServiceImplementation(EventQueryService.class, new EventQueryService(getManager()));
+ this.registerServiceImplementation(TagAutoCompleteQueryService.class, new TagAutoCompleteQueryService(getManager(), swConfig));
+ this.registerServiceImplementation(RecordQueryService.class, new RecordQueryService(getManager()));
+ this.registerServiceImplementation(ProfileTaskMutationService.class, new ProfileTaskMutationService(getManager()));
+ this.registerServiceImplementation(ProfileTaskQueryService.class, new ProfileTaskQueryService(getManager(), swConfig));
+ this.registerServiceImplementation(ProfileTaskCache.class, new ProfileTaskCache(getManager(), swConfig));
+ this.registerServiceImplementation(EBPFProfilingMutationService.class, new EBPFProfilingMutationService(getManager()));
+ this.registerServiceImplementation(EBPFProfilingQueryService.class, new EBPFProfilingQueryService(getManager(), swConfig, this.storageModels));
+ this.registerServiceImplementation(ContinuousProfilingMutationService.class, new ContinuousProfilingMutationService(getManager()));
+ this.registerServiceImplementation(ContinuousProfilingQueryService.class, new ContinuousProfilingQueryService(getManager()));
+ this.registerServiceImplementation(CommandService.class, new CommandService(getManager()));
+ this.registerServiceImplementation(OALEngineLoaderService.class, new OALEngineLoaderService(getManager()));
+ this.registerServiceImplementation(RemoteClientManager.class, new RemoteClientManager(getManager(), 0));
+ this.registerServiceImplementation(UITemplateManagementService.class, new UITemplateManagementService(getManager()));
+ this.registerServiceImplementation(UIMenuManagementService.class, new UIMenuManagementService(getManager(), swConfig));
+
+ if (moduleConfig.getMetricsDataTTL() < 2) {
+ throw new ModuleStartException(
+ "Metric TTL should be at least 2 days, current value is " + moduleConfig.getMetricsDataTTL());
+ }
+ if (moduleConfig.getRecordDataTTL() < 2) {
+ throw new ModuleStartException(
+ "Record TTL should be at least 2 days, current value is " + moduleConfig.getRecordDataTTL());
+ }
+
+ final MetricsStreamProcessor metricsStreamProcessor = MetricsStreamProcessor.getInstance();
+ metricsStreamProcessor.setL1FlushPeriod(moduleConfig.getL1FlushPeriod());
+ metricsStreamProcessor.setStorageSessionTimeout(moduleConfig.getStorageSessionTimeout());
+ metricsStreamProcessor.setMetricsDataTTL(moduleConfig.getMetricsDataTTL());
+ }
+
+ @Override
+ public void start() throws ServiceNotProvidedException, ModuleStartException {
+
+ }
+
+ @Override
+ public void notifyAfterCompleted() throws ServiceNotProvidedException, ModuleStartException {
+
+ }
+
+ @Override
+ public String[] requiredModules() {
+ return new String[0];
+ }
+}
diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyComponentLibraryCatalogService.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyComponentLibraryCatalogService.java
new file mode 100644
index 00000000000..1212ce5792b
--- /dev/null
+++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyComponentLibraryCatalogService.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package zipkin.server.core.services;
+
+import org.apache.skywalking.oap.server.core.config.IComponentLibraryCatalogService;
+
+public class EmptyComponentLibraryCatalogService implements IComponentLibraryCatalogService {
+ @Override
+ public int getComponentId(String componentName) {
+ return 0;
+ }
+
+ @Override
+ public int getServerIdBasedOnComponent(int componentId) {
+ return 0;
+ }
+
+ @Override
+ public String getComponentName(int componentId) {
+ return null;
+ }
+
+ @Override
+ public String getServerNameBasedOnComponent(int componentId) {
+ return null;
+ }
+
+ @Override
+ public boolean compare(int componentA, int componentB) {
+ return false;
+ }
+}
diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyGRPCHandlerRegister.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyGRPCHandlerRegister.java
new file mode 100644
index 00000000000..353d14b5d08
--- /dev/null
+++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyGRPCHandlerRegister.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package zipkin.server.core.services;
+
+import io.grpc.BindableService;
+import io.grpc.ServerInterceptor;
+import io.grpc.ServerServiceDefinition;
+import org.apache.skywalking.oap.server.core.server.GRPCHandlerRegister;
+
+public class EmptyGRPCHandlerRegister implements GRPCHandlerRegister {
+ @Override
+ public void addHandler(BindableService handler) {
+ }
+
+ @Override
+ public void addHandler(ServerServiceDefinition definition) {
+ }
+
+ @Override
+ public void addFilter(ServerInterceptor interceptor) {
+ }
+}
diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyHTTPHandlerRegister.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyHTTPHandlerRegister.java
new file mode 100644
index 00000000000..882e3175463
--- /dev/null
+++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyHTTPHandlerRegister.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package zipkin.server.core.services;
+
+import com.linecorp.armeria.common.HttpMethod;
+import org.apache.skywalking.oap.server.core.server.HTTPHandlerRegister;
+
+import java.util.List;
+
+public class EmptyHTTPHandlerRegister implements HTTPHandlerRegister {
+ @Override
+ public void addHandler(Object httpService, List httpMethods) {
+
+ }
+}
diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyNetworkAddressAliasCache.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyNetworkAddressAliasCache.java
new file mode 100644
index 00000000000..779e85d2318
--- /dev/null
+++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/EmptyNetworkAddressAliasCache.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package zipkin.server.core.services;
+
+import org.apache.skywalking.oap.server.core.CoreModuleConfig;
+import org.apache.skywalking.oap.server.core.cache.NetworkAddressAliasCache;
+
+public class EmptyNetworkAddressAliasCache extends NetworkAddressAliasCache {
+ public EmptyNetworkAddressAliasCache() {
+ super(new CoreModuleConfig());
+ }
+}
diff --git a/zipkin-server/server-core/src/main/java/zipkin/server/core/services/ZipkinConfigService.java b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/ZipkinConfigService.java
new file mode 100644
index 00000000000..e60ae1d967b
--- /dev/null
+++ b/zipkin-server/server-core/src/main/java/zipkin/server/core/services/ZipkinConfigService.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package zipkin.server.core.services;
+
+import org.apache.skywalking.oap.server.core.config.ConfigService;
+import org.apache.skywalking.oap.server.library.module.ModuleProvider;
+import zipkin.server.core.CoreModuleConfig;
+
+public class ZipkinConfigService extends ConfigService {
+
+ public ZipkinConfigService(CoreModuleConfig moduleConfig, ModuleProvider provider) {
+ super(new org.apache.skywalking.oap.server.core.CoreModuleConfig(), provider);
+ }
+}
diff --git a/zipkin-server/server-core/src/main/resources/META-INF/services/org.apache.skywalking.oap.server.library.module.ModuleProvider b/zipkin-server/server-core/src/main/resources/META-INF/services/org.apache.skywalking.oap.server.library.module.ModuleProvider
new file mode 100755
index 00000000000..1baafe94b46
--- /dev/null
+++ b/zipkin-server/server-core/src/main/resources/META-INF/services/org.apache.skywalking.oap.server.library.module.ModuleProvider
@@ -0,0 +1,19 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+zipkin.server.core.CoreModuleProvider
\ No newline at end of file
diff --git a/zipkin-server/server-starter/pom.xml b/zipkin-server/server-starter/pom.xml
new file mode 100644
index 00000000000..d280d7ee030
--- /dev/null
+++ b/zipkin-server/server-starter/pom.xml
@@ -0,0 +1,242 @@
+
+
+ 4.0.0
+
+
+ zipkin-server-parent
+ io.zipkin
+ 2.24.4-SNAPSHOT
+
+
+ zipkin-server
+ Zipkin Server
+
+
+ ${project.basedir}/src/main/resources/version.properties
+
+
+
+
+
+
+ io.zipkin
+ zipkin-server-core
+ ${project.version}
+
+
+
+
+ org.apache.skywalking
+ cluster-standalone-plugin
+ ${skywalking.version}
+
+
+
+ org.apache.skywalking
+ telemetry-prometheus
+ ${skywalking.version}
+
+
+
+
+ org.apache.skywalking
+ storage-jdbc-hikaricp-plugin
+ ${skywalking.version}
+
+
+ org.apache.skywalking
+ storage-banyandb-plugin
+ ${skywalking.version}
+
+
+ org.apache.skywalking
+ storage-elasticsearch-plugin
+ ${skywalking.version}
+
+
+
+
+ org.apache.skywalking
+ zipkin-receiver-plugin
+ ${skywalking.version}
+
+
+
+
+ org.apache.skywalking
+ zipkin-query-plugin
+ ${skywalking.version}
+
+
+
+
+ org.apache.skywalking
+ library-server
+ ${skywalking.version}
+
+
+
+ com.google.code.gson
+ gson
+ ${gson.version}
+
+
+ org.slf4j
+ slf4j-api
+ ${slf4j.version}
+
+
+
+
+
+
+ src/main/resources
+ true
+
+
+
+
+
+ maven-dependency-plugin
+
+
+ com.squareup.wire
+ wire-maven-plugin
+
+
+ generate-test-sources
+
+ generate-sources
+
+
+ ${proto.generatedSourceDirectory}
+
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+ 3.2.0
+
+
+ add-test-source
+ generate-test-sources
+
+ add-test-source
+
+
+
+
+
+
+
+
+
+
+
+ pl.project13.maven
+ git-commit-id-plugin
+ ${git-commit-id.version}
+
+
+ extract-git-info
+
+ revision
+
+
+
+
+ false
+
+
+
+
+ org.springframework.boot
+ spring-boot-maven-plugin
+ ${spring-boot.version}
+
+ zipkin.server.ZipkinServer
+ true
+
+
+
+ exec
+
+ repackage
+
+
+ exec
+
+
+ org.slf4j
+ slf4j-simple
+
+
+
+
+
+
+ slim
+
+ repackage
+
+
+ slim
+
+
+ com.google.auto.value,io.dropwizard.metrics,com.datastax.oss,com.github.jnr,org.ow2.asm,org.jooq,javax.xml.bind,org.mariadb.jdbc,org.apache.activemq,org.apache.geronimo.specs,org.fusesource.hawtbuf,org.apache.kafka,com.github.luben,org.lz4,org.xerial.snappy,com.rabbitmq,jakarta.annotation,org.apache.thrift,org.apache.skywalking.banyandb,org.postgresql
+
+
+
+ com.fasterxml.jackson.datatype
+ jackson-datatype-jsr310
+
+
+
+
+ io.netty
+ netty-tcnative-boringssl-static
+
+
+ io.netty
+ netty-codec-haproxy
+
+
+
+
+
+
+
+
+ pl.project13.maven
+ git-commit-id-plugin
+ 4.9.10
+
+
+ get-the-git-information
+
+ revision
+
+ initialize
+
+
+
+ false
+ true
+ ${generateGitPropertiesFilename}
+ UTC
+ yyyyMMddHHmmss
+ false
+
+ ^git.build.version$
+ ^git.commit.id$
+
+
+
+
+
+
\ No newline at end of file
diff --git a/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServer.java b/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServer.java
new file mode 100644
index 00000000000..8936165347c
--- /dev/null
+++ b/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServer.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package zipkin.server;
+
+public class ZipkinServer {
+ public static void main(String[] args) {
+ ZipkinServerBootstrap.start();
+ }
+}
diff --git a/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServerBootstrap.java b/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServerBootstrap.java
new file mode 100644
index 00000000000..ed8027965b7
--- /dev/null
+++ b/zipkin-server/server-starter/src/main/java/zipkin/server/ZipkinServerBootstrap.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package zipkin.server;
+
+import org.apache.skywalking.oap.server.core.CoreModule;
+import org.apache.skywalking.oap.server.core.RunningMode;
+import org.apache.skywalking.oap.server.core.status.ServerStatusService;
+import org.apache.skywalking.oap.server.core.version.Version;
+import org.apache.skywalking.oap.server.library.module.ApplicationConfiguration;
+import org.apache.skywalking.oap.server.library.module.ModuleManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import zipkin.server.config.ApplicationConfigLoader;
+
+/**
+ * Starter core. Load the core configuration file, and initialize the startup sequence through {@link ModuleManager}.
+ */
+public class ZipkinServerBootstrap {
+ private static final Logger log = LoggerFactory.getLogger(ZipkinServerBootstrap.class);
+
+ public static void start() {
+ String mode = System.getProperty("mode");
+ RunningMode.setMode(mode);
+
+ ApplicationConfigLoader configLoader = new ApplicationConfigLoader();
+ ModuleManager manager = new ModuleManager();
+ try {
+ ApplicationConfiguration applicationConfiguration = configLoader.load();
+ manager.init(applicationConfiguration);
+
+ manager.find(CoreModule.NAME)
+ .provider()
+ .getService(ServerStatusService.class)
+ .bootedNow(System.currentTimeMillis());
+
+ log.info("Version of Zipkin: {}", Version.CURRENT);
+
+ if (RunningMode.isInitMode()) {
+ log.info("Zipkin starts up in init mode successfully, exit now...");
+ System.exit(0);
+ }
+ } catch (Throwable t) {
+ log.error(t.getMessage(), t);
+ System.exit(1);
+ }
+ }
+}
diff --git a/zipkin-server/server-starter/src/main/java/zipkin/server/config/ApplicationConfigLoader.java b/zipkin-server/server-starter/src/main/java/zipkin/server/config/ApplicationConfigLoader.java
new file mode 100644
index 00000000000..6963e3175ae
--- /dev/null
+++ b/zipkin-server/server-starter/src/main/java/zipkin/server/config/ApplicationConfigLoader.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package zipkin.server.config;
+
+import org.apache.skywalking.oap.server.library.module.ApplicationConfiguration;
+import org.apache.skywalking.oap.server.library.module.ProviderNotFoundException;
+import org.apache.skywalking.oap.server.library.util.CollectionUtils;
+import org.apache.skywalking.oap.server.library.util.PropertyPlaceholderHelper;
+import org.apache.skywalking.oap.server.library.util.ResourceUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.yaml.snakeyaml.Yaml;
+
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Initialize collector settings with following sources. Use application.yml as primary setting, and fix missing setting
+ * by default settings in application-default.yml.
+ *
+ * At last, override setting by system.properties and system.envs if the key matches moduleName.provideName.settingKey.
+ */
+public class ApplicationConfigLoader implements ConfigLoader {
+ static final Logger log = LoggerFactory.getLogger(ApplicationConfigLoader.class.getName());
+
+ private static final String DISABLE_SELECTOR = "-";
+ private static final String SELECTOR = "selector";
+
+ private final Yaml yaml = new Yaml();
+
+ @Override
+ public ApplicationConfiguration load() throws ConfigFileNotFoundException {
+ ApplicationConfiguration configuration = new ApplicationConfiguration();
+ this.loadConfig(configuration);
+ this.overrideConfigBySystemEnv(configuration);
+ return configuration;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void loadConfig(ApplicationConfiguration configuration) throws ConfigFileNotFoundException {
+ try {
+ Reader applicationReader = ResourceUtils.read("application.yml");
+ Map> moduleConfig = yaml.loadAs(applicationReader, Map.class);
+ if (CollectionUtils.isNotEmpty(moduleConfig)) {
+ selectConfig(moduleConfig);
+ moduleConfig.forEach((moduleName, providerConfig) -> {
+ if (providerConfig.size() > 0) {
+ log.info("Get a module define from application.yml, module name: {}", moduleName);
+ ApplicationConfiguration.ModuleConfiguration moduleConfiguration = configuration.addModule(
+ moduleName);
+ providerConfig.forEach((providerName, config) -> {
+ log.info(
+ "Get a provider define belong to {} module, provider name: {}", moduleName,
+ providerName
+ );
+ final Map propertiesConfig = (Map) config;
+ final Properties properties = new Properties();
+ if (propertiesConfig != null) {
+ propertiesConfig.forEach((propertyName, propertyValue) -> {
+ if (propertyValue instanceof Map) {
+ Properties subProperties = new Properties();
+ ((Map) propertyValue).forEach((key, value) -> {
+ subProperties.put(key, value);
+ replacePropertyAndLog(key, value, subProperties, providerName);
+ });
+ properties.put(propertyName, subProperties);
+ } else {
+ properties.put(propertyName, propertyValue);
+ replacePropertyAndLog(propertyName, propertyValue, properties, providerName);
+ }
+ });
+ }
+ moduleConfiguration.addProviderConfiguration(providerName, properties);
+ });
+ } else {
+ log.warn(
+ "Get a module define from application.yml, but no provider define, use default, module name: {}",
+ moduleName
+ );
+ }
+ });
+ }
+ } catch (FileNotFoundException e) {
+ throw new ConfigFileNotFoundException(e.getMessage(), e);
+ }
+ }
+
+ private void replacePropertyAndLog(final String propertyName, final Object propertyValue, final Properties target,
+ final Object providerName) {
+ final String valueString = PropertyPlaceholderHelper.INSTANCE
+ .replacePlaceholders(String.valueOf(propertyValue), target);
+ if (valueString.trim().length() == 0) {
+ target.replace(propertyName, valueString);
+ log.info("Provider={} config={} has been set as an empty string", providerName, propertyName);
+ } else {
+ // Use YAML to do data type conversion.
+ final Object replaceValue = convertValueString(valueString);
+ if (replaceValue != null) {
+ target.replace(propertyName, replaceValue);
+ }
+ }
+ }
+
+ private Object convertValueString(String valueString) {
+ try {
+ Object replaceValue = yaml.load(valueString);
+ if (replaceValue instanceof String || replaceValue instanceof Integer || replaceValue instanceof Long || replaceValue instanceof Boolean || replaceValue instanceof ArrayList) {
+ return replaceValue;
+ } else {
+ return valueString;
+ }
+ } catch (Exception e) {
+ log.warn("yaml convert value type error, use origin values string. valueString={}", valueString, e);
+ return valueString;
+ }
+ }
+
+ private void overrideConfigBySystemEnv(ApplicationConfiguration configuration) {
+ for (Map.Entry